1 // SPDX-License-Identifier: GPL-2.0
3 * Gasket generic driver framework. This file contains the implementation
4 * for the Gasket generic driver framework - the functionality that is common
5 * across Gasket devices.
7 * Copyright (C) 2018 Google, Inc.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include "gasket_core.h"
14 #include "gasket_interrupt.h"
15 #include "gasket_ioctl.h"
16 #include "gasket_page_table.h"
17 #include "gasket_sysfs.h"
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
24 #include <linux/init.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/gasket_mmap.h>
34 #define trace_gasket_mmap_exit(x)
35 #define trace_gasket_mmap_entry(x, ...)
39 * "Private" members of gasket_driver_desc.
41 * Contains internal per-device type tracking data, i.e., data not appropriate
42 * as part of the public interface for the generic framework.
44 struct gasket_internal_desc {
45 /* Device-specific-driver-provided configuration information. */
46 const struct gasket_driver_desc *driver_desc;
48 /* Protects access to per-driver data (i.e. this structure). */
51 /* Kernel-internal device class. */
54 /* Instantiated / present devices of this type. */
55 struct gasket_dev *devs[GASKET_DEV_MAX];
58 /* do_map_region() needs be able to return more than just true/false. */
59 enum do_map_region_status {
60 /* The region was successfully mapped. */
61 DO_MAP_REGION_SUCCESS,
63 /* Attempted to map region and failed. */
64 DO_MAP_REGION_FAILURE,
66 /* The requested region to map was not part of a mappable region. */
67 DO_MAP_REGION_INVALID,
70 /* Global data definitions. */
71 /* Mutex - only for framework-wide data. Other data should be protected by
72 * finer-grained locks.
74 static DEFINE_MUTEX(g_mutex);
76 /* List of all registered device descriptions & their supporting data. */
77 static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
80 static const struct gasket_num_name gasket_status_name_table[] = {
81 { GASKET_STATUS_DEAD, "DEAD" },
82 { GASKET_STATUS_ALIVE, "ALIVE" },
83 { GASKET_STATUS_LAMED, "LAMED" },
84 { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
89 enum gasket_sysfs_attribute_type {
93 ATTR_FRAMEWORK_VERSION,
95 ATTR_HARDWARE_REVISION,
100 ATTR_WRITE_OPEN_COUNT,
105 /* Perform a standard Gasket callback. */
107 check_and_invoke_callback(struct gasket_dev *gasket_dev,
108 int (*cb_function)(struct gasket_dev *))
112 dev_dbg(gasket_dev->dev, "check_and_invoke_callback %p\n",
115 mutex_lock(&gasket_dev->mutex);
116 ret = cb_function(gasket_dev);
117 mutex_unlock(&gasket_dev->mutex);
122 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
124 gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
125 int (*cb_function)(struct gasket_dev *))
130 dev_dbg(gasket_dev->dev,
131 "Invoking device-specific callback.\n");
132 ret = cb_function(gasket_dev);
138 * Return nonzero if the gasket_cdev_info is owned by the current thread group
141 static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
143 return (info->ownership.is_owned &&
144 (info->ownership.owner == current->tgid));
148 * Find the next free gasket_internal_dev slot.
150 * Returns the located slot number on success or a negative number on failure.
152 static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
153 const char *kobj_name)
157 mutex_lock(&internal_desc->mutex);
159 /* Search for a previous instance of this device. */
160 for (i = 0; i < GASKET_DEV_MAX; i++) {
161 if (internal_desc->devs[i] &&
162 strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
163 pr_err("Duplicate device %s\n", kobj_name);
164 mutex_unlock(&internal_desc->mutex);
169 /* Find a free device slot. */
170 for (i = 0; i < GASKET_DEV_MAX; i++) {
171 if (!internal_desc->devs[i])
175 if (i == GASKET_DEV_MAX) {
176 pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
177 mutex_unlock(&internal_desc->mutex);
181 mutex_unlock(&internal_desc->mutex);
186 * Allocate and initialize a Gasket device structure, add the device to the
189 * Returns 0 if successful, a negative error code otherwise.
191 static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
192 struct device *parent, struct gasket_dev **pdev,
193 const char *kobj_name)
196 const struct gasket_driver_desc *driver_desc =
197 internal_desc->driver_desc;
198 struct gasket_dev *gasket_dev;
199 struct gasket_cdev_info *dev_info;
201 pr_debug("Allocating a Gasket device %s.\n", kobj_name);
205 dev_idx = gasket_find_dev_slot(internal_desc, kobj_name);
209 gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
211 pr_err("no memory for device %s\n", kobj_name);
214 internal_desc->devs[dev_idx] = gasket_dev;
216 mutex_init(&gasket_dev->mutex);
218 gasket_dev->internal_desc = internal_desc;
219 gasket_dev->dev_idx = dev_idx;
220 snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", kobj_name);
221 gasket_dev->dev = get_device(parent);
222 /* gasket_bar_data is uninitialized. */
223 gasket_dev->num_page_tables = driver_desc->num_page_tables;
224 /* max_page_table_size and *page table are uninit'ed */
225 /* interrupt_data is not initialized. */
226 /* status is 0, or GASKET_STATUS_DEAD */
228 dev_info = &gasket_dev->dev_info;
229 snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
230 gasket_dev->dev_idx);
232 MKDEV(driver_desc->major, driver_desc->minor +
233 gasket_dev->dev_idx);
234 dev_info->device = device_create(internal_desc->class, parent,
235 dev_info->devt, gasket_dev, dev_info->name);
237 dev_dbg(dev_info->device, "Gasket device allocated.\n");
239 /* cdev has not yet been added; cdev_added is 0 */
240 dev_info->gasket_dev_ptr = gasket_dev;
241 /* ownership is all 0, indicating no owner or opens. */
246 /* Free a Gasket device. */
247 static void gasket_free_dev(struct gasket_dev *gasket_dev)
249 struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
251 mutex_lock(&internal_desc->mutex);
252 internal_desc->devs[gasket_dev->dev_idx] = NULL;
253 mutex_unlock(&internal_desc->mutex);
254 put_device(gasket_dev->dev);
259 * Maps the specified bar into kernel space.
261 * Returns 0 on success, a negative error code otherwise.
262 * A zero-sized BAR will not be mapped, but is not an error.
264 static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
266 struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
267 const struct gasket_driver_desc *driver_desc =
268 internal_desc->driver_desc;
269 ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
275 if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
276 /* not PCI: skip this entry */
280 * pci_resource_start and pci_resource_len return a "resource_size_t",
281 * which is safely castable to ulong (which itself is the arg to
282 * request_mem_region).
284 gasket_dev->bar_data[bar_num].phys_base =
285 (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
286 if (!gasket_dev->bar_data[bar_num].phys_base) {
287 dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
292 gasket_dev->bar_data[bar_num].length_bytes =
293 (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
294 if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
295 dev_err(gasket_dev->dev,
296 "PCI BAR %u space is too small: %lu; expected >= %lu\n",
297 bar_num, gasket_dev->bar_data[bar_num].length_bytes,
302 if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
303 gasket_dev->bar_data[bar_num].length_bytes,
304 gasket_dev->dev_info.name)) {
305 dev_err(gasket_dev->dev,
306 "Cannot get BAR %d memory region %p\n",
307 bar_num, &gasket_dev->pci_dev->resource[bar_num]);
311 gasket_dev->bar_data[bar_num].virt_base =
312 ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
313 gasket_dev->bar_data[bar_num].length_bytes);
314 if (!gasket_dev->bar_data[bar_num].virt_base) {
315 dev_err(gasket_dev->dev,
316 "Cannot remap BAR %d memory region %p\n",
317 bar_num, &gasket_dev->pci_dev->resource[bar_num]);
322 dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
323 dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
328 iounmap(gasket_dev->bar_data[bar_num].virt_base);
329 release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
330 gasket_dev->bar_data[bar_num].length_bytes);
335 * Releases PCI BAR mapping.
337 * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
339 static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
342 struct gasket_internal_desc *internal_desc = dev->internal_desc;
343 const struct gasket_driver_desc *driver_desc =
344 internal_desc->driver_desc;
346 if (driver_desc->bar_descriptions[bar_num].size == 0 ||
347 !dev->bar_data[bar_num].virt_base)
350 if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
353 iounmap(dev->bar_data[bar_num].virt_base);
354 dev->bar_data[bar_num].virt_base = NULL;
356 base = pci_resource_start(dev->pci_dev, bar_num);
358 dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
363 bytes = pci_resource_len(dev->pci_dev, bar_num);
364 release_mem_region(base, bytes);
368 * Setup PCI memory mapping for the specified device.
370 * Reads the BAR registers and sets up pointers to the device's memory mapped
373 * Returns 0 on success and a negative value otherwise.
375 static int gasket_setup_pci(struct pci_dev *pci_dev,
376 struct gasket_dev *gasket_dev)
378 int i, mapped_bars, ret;
380 for (i = 0; i < GASKET_NUM_BARS; i++) {
381 ret = gasket_map_pci_bar(gasket_dev, i);
391 for (i = 0; i < mapped_bars; i++)
392 gasket_unmap_pci_bar(gasket_dev, i);
397 /* Unmaps memory for the specified device. */
398 static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
402 for (i = 0; i < GASKET_NUM_BARS; i++)
403 gasket_unmap_pci_bar(gasket_dev, i);
406 /* Determine the health of the Gasket device. */
407 static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
411 const struct gasket_driver_desc *driver_desc =
412 gasket_dev->internal_desc->driver_desc;
414 status = gasket_check_and_invoke_callback_nolock(gasket_dev,
415 driver_desc->device_status_cb);
416 if (status != GASKET_STATUS_ALIVE) {
417 dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
422 status = gasket_interrupt_system_status(gasket_dev);
423 if (status != GASKET_STATUS_ALIVE) {
424 dev_dbg(gasket_dev->dev,
425 "Interrupt system reported status %d.\n", status);
429 for (i = 0; i < driver_desc->num_page_tables; ++i) {
430 status = gasket_page_table_system_status(gasket_dev->page_table[i]);
431 if (status != GASKET_STATUS_ALIVE) {
432 dev_dbg(gasket_dev->dev,
433 "Page table %d reported status %d.\n",
439 return GASKET_STATUS_ALIVE;
443 gasket_write_mappable_regions(char *buf,
444 const struct gasket_driver_desc *driver_desc,
449 ssize_t total_written = 0;
450 ulong min_addr, max_addr;
451 struct gasket_bar_desc bar_desc =
452 driver_desc->bar_descriptions[bar_index];
454 if (bar_desc.permissions == GASKET_NOMAP)
457 i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
459 min_addr = bar_desc.mappable_regions[i].start -
460 driver_desc->legacy_mmap_address_offset;
461 max_addr = bar_desc.mappable_regions[i].start -
462 driver_desc->legacy_mmap_address_offset +
463 bar_desc.mappable_regions[i].length_bytes;
464 written = scnprintf(buf, PAGE_SIZE - total_written,
465 "0x%08lx-0x%08lx\n", min_addr, max_addr);
466 total_written += written;
469 return total_written;
472 static ssize_t gasket_sysfs_data_show(struct device *device,
473 struct device_attribute *attr, char *buf)
476 ssize_t current_written = 0;
477 const struct gasket_driver_desc *driver_desc;
478 struct gasket_dev *gasket_dev;
479 struct gasket_sysfs_attribute *gasket_attr;
480 const struct gasket_bar_desc *bar_desc;
481 enum gasket_sysfs_attribute_type sysfs_type;
483 gasket_dev = gasket_sysfs_get_device_data(device);
485 dev_err(device, "No sysfs mapping found for device\n");
489 gasket_attr = gasket_sysfs_get_attr(device, attr);
491 dev_err(device, "No sysfs attr found for device\n");
492 gasket_sysfs_put_device_data(device, gasket_dev);
496 driver_desc = gasket_dev->internal_desc->driver_desc;
499 (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
500 switch (sysfs_type) {
501 case ATTR_BAR_OFFSETS:
502 for (i = 0; i < GASKET_NUM_BARS; i++) {
503 bar_desc = &driver_desc->bar_descriptions[i];
504 if (bar_desc->size == 0)
507 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
508 (ulong)bar_desc->base);
509 buf += current_written;
510 ret += current_written;
514 for (i = 0; i < GASKET_NUM_BARS; i++) {
515 bar_desc = &driver_desc->bar_descriptions[i];
516 if (bar_desc->size == 0)
519 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
520 (ulong)bar_desc->size);
521 buf += current_written;
522 ret += current_written;
525 case ATTR_DRIVER_VERSION:
526 ret = snprintf(buf, PAGE_SIZE, "%s\n",
527 gasket_dev->internal_desc->driver_desc->driver_version);
529 case ATTR_FRAMEWORK_VERSION:
530 ret = snprintf(buf, PAGE_SIZE, "%s\n",
531 GASKET_FRAMEWORK_VERSION);
533 case ATTR_DEVICE_TYPE:
534 ret = snprintf(buf, PAGE_SIZE, "%s\n",
535 gasket_dev->internal_desc->driver_desc->name);
537 case ATTR_HARDWARE_REVISION:
538 ret = snprintf(buf, PAGE_SIZE, "%d\n",
539 gasket_dev->hardware_revision);
541 case ATTR_PCI_ADDRESS:
542 ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
545 ret = snprintf(buf, PAGE_SIZE, "%s\n",
546 gasket_num_name_lookup(gasket_dev->status,
547 gasket_status_name_table));
549 case ATTR_IS_DEVICE_OWNED:
550 ret = snprintf(buf, PAGE_SIZE, "%d\n",
551 gasket_dev->dev_info.ownership.is_owned);
553 case ATTR_DEVICE_OWNER:
554 ret = snprintf(buf, PAGE_SIZE, "%d\n",
555 gasket_dev->dev_info.ownership.owner);
557 case ATTR_WRITE_OPEN_COUNT:
558 ret = snprintf(buf, PAGE_SIZE, "%d\n",
559 gasket_dev->dev_info.ownership.write_open_count);
561 case ATTR_RESET_COUNT:
562 ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
564 case ATTR_USER_MEM_RANGES:
565 for (i = 0; i < GASKET_NUM_BARS; ++i) {
567 gasket_write_mappable_regions(buf, driver_desc,
569 buf += current_written;
570 ret += current_written;
574 dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
580 gasket_sysfs_put_attr(device, gasket_attr);
581 gasket_sysfs_put_device_data(device, gasket_dev);
585 /* These attributes apply to all Gasket driver instances. */
586 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
587 GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
588 GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
589 GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
590 ATTR_DRIVER_VERSION),
591 GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
592 ATTR_FRAMEWORK_VERSION),
593 GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
594 GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
595 ATTR_HARDWARE_REVISION),
596 GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
597 GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
598 GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
599 ATTR_IS_DEVICE_OWNED),
600 GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
602 GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
603 ATTR_WRITE_OPEN_COUNT),
604 GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
605 GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
606 ATTR_USER_MEM_RANGES),
607 GASKET_END_OF_ATTR_ARRAY
610 /* Add a char device and related info. */
611 static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
612 const struct file_operations *file_ops,
613 struct module *owner)
617 cdev_init(&dev_info->cdev, file_ops);
618 dev_info->cdev.owner = owner;
619 ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
621 dev_err(dev_info->gasket_dev_ptr->dev,
622 "cannot add char device [ret=%d]\n", ret);
625 dev_info->cdev_added = 1;
630 /* Disable device operations. */
631 void gasket_disable_device(struct gasket_dev *gasket_dev)
633 const struct gasket_driver_desc *driver_desc =
634 gasket_dev->internal_desc->driver_desc;
637 /* Only delete the device if it has been successfully added. */
638 if (gasket_dev->dev_info.cdev_added)
639 cdev_del(&gasket_dev->dev_info.cdev);
641 gasket_dev->status = GASKET_STATUS_DEAD;
643 gasket_interrupt_cleanup(gasket_dev);
645 for (i = 0; i < driver_desc->num_page_tables; ++i) {
646 if (gasket_dev->page_table[i]) {
647 gasket_page_table_reset(gasket_dev->page_table[i]);
648 gasket_page_table_cleanup(gasket_dev->page_table[i]);
652 EXPORT_SYMBOL(gasket_disable_device);
655 * Registered descriptor lookup.
657 * Precondition: Called with g_mutex held (to avoid a race on return).
658 * Returns NULL if no matching device was found.
660 static struct gasket_internal_desc *
661 lookup_internal_desc(struct pci_dev *pci_dev)
665 __must_hold(&g_mutex);
666 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
667 if (g_descs[i].driver_desc &&
668 g_descs[i].driver_desc->pci_id_table &&
669 pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
677 * Verifies that the user has permissions to perform the requested mapping and
678 * that the provided descriptor/range is of adequate size to hold the range to
681 static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
682 struct vm_area_struct *vma,
685 int requested_permissions;
686 /* Always allow sysadmin to access. */
687 if (capable(CAP_SYS_ADMIN))
690 /* Never allow non-sysadmins to access to a dead device. */
691 if (gasket_dev->status != GASKET_STATUS_ALIVE) {
692 dev_dbg(gasket_dev->dev, "Device is dead.\n");
696 /* Make sure that no wrong flags are set. */
697 requested_permissions =
698 (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
699 if (requested_permissions & ~(bar_permissions)) {
700 dev_dbg(gasket_dev->dev,
701 "Attempting to map a region with requested permissions "
702 "0x%x, but region has permissions 0x%x.\n",
703 requested_permissions, bar_permissions);
707 /* Do not allow a non-owner to write. */
708 if ((vma->vm_flags & VM_WRITE) &&
709 !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
710 dev_dbg(gasket_dev->dev,
711 "Attempting to mmap a region for write without owning "
720 * Verifies that the input address is within the region allocated to coherent
724 gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
727 struct gasket_coherent_buffer_desc coh_buff_desc =
728 driver_desc->coherent_buffer_description;
730 if (coh_buff_desc.permissions != GASKET_NOMAP) {
731 if ((address >= coh_buff_desc.base) &&
732 (address < coh_buff_desc.base + coh_buff_desc.size)) {
739 static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
743 const struct gasket_driver_desc *driver_desc;
745 driver_desc = gasket_dev->internal_desc->driver_desc;
746 for (i = 0; i < GASKET_NUM_BARS; ++i) {
747 struct gasket_bar_desc bar_desc =
748 driver_desc->bar_descriptions[i];
750 if (bar_desc.permissions != GASKET_NOMAP) {
751 if (phys_addr >= bar_desc.base &&
752 phys_addr < (bar_desc.base + bar_desc.size)) {
757 /* If we haven't found the address by now, it is invalid. */
762 * Sets the actual bounds to map, given the device's mappable region.
764 * Given the device's mappable region, along with the user-requested mapping
765 * start offset and length of the user region, determine how much of this
766 * mappable region can be mapped into the user's region (start/end offsets),
767 * and the physical offset (phys_offset) into the BAR where the mapping should
768 * begin (either the VMA's or region lower bound).
770 * In other words, this calculates the overlap between the VMA
771 * (bar_offset, requested_length) and the given gasket_mappable_region.
773 * Returns true if there's anything to map, and false otherwise.
776 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
777 ulong bar_offset, ulong requested_length,
778 struct gasket_mappable_region *mappable_region,
781 ulong range_start = region->start;
782 ulong range_length = region->length_bytes;
783 ulong range_end = range_start + range_length;
786 if (bar_offset + requested_length < range_start) {
788 * If the requested region is completely below the range,
789 * there is nothing to map.
792 } else if (bar_offset <= range_start) {
793 /* If the bar offset is below this range's start
794 * but the requested length continues into it:
795 * 1) Only map starting from the beginning of this
796 * range's phys. offset, so we don't map unmappable
798 * 2) The length of the virtual memory to not map is the
799 * delta between the bar offset and the
800 * mappable start (and since the mappable start is
801 * bigger, start - req.)
802 * 3) The map length is the minimum of the mappable
803 * requested length (requested_length - virt_offset)
804 * and the actual mappable length of the range.
806 mappable_region->start = range_start;
807 *virt_offset = range_start - bar_offset;
808 mappable_region->length_bytes =
809 min(requested_length - *virt_offset, range_length);
811 } else if (bar_offset > range_start &&
812 bar_offset < range_end) {
814 * If the bar offset is within this range:
815 * 1) Map starting from the bar offset.
816 * 2) Because there is no forbidden memory between the
817 * bar offset and the range start,
819 * 3) The map length is the minimum of the requested
820 * length and the remaining length in the buffer
821 * (range_end - bar_offset)
823 mappable_region->start = bar_offset;
825 mappable_region->length_bytes =
826 min(requested_length, range_end - bar_offset);
831 * If the requested [start] offset is above range_end,
832 * there's nothing to map.
838 * Calculates the offset where the VMA range begins in its containing BAR.
839 * The offset is written into bar_offset on success.
840 * Returns zero on success, anything else on error.
842 static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
843 const struct vm_area_struct *vma,
848 const struct gasket_driver_desc *driver_desc =
849 gasket_dev->internal_desc->driver_desc;
851 raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
852 driver_desc->legacy_mmap_address_offset;
853 bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
855 dev_err(gasket_dev->dev,
856 "Unable to find matching bar for address 0x%lx\n",
858 trace_gasket_mmap_exit(bar_index);
862 raw_offset - driver_desc->bar_descriptions[bar_index].base;
867 int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
868 struct vm_area_struct *vma,
869 const struct gasket_mappable_region *map_region)
873 struct gasket_mappable_region mappable_region;
876 if (map_region->length_bytes == 0)
879 ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
883 if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
884 vma->vm_end - vma->vm_start,
885 &mappable_region, &virt_offset))
889 * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
890 * PAGE_SIZE! Trust me. I have the scars.
892 * Next multiple of y: ceil_div(x, y) * y
894 zap_vma_ptes(vma, vma->vm_start + virt_offset,
895 DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
899 EXPORT_SYMBOL(gasket_mm_unmap_region);
901 /* Maps a virtual address + range to a physical offset of a BAR. */
902 static enum do_map_region_status
903 do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
904 struct gasket_mappable_region *mappable_region)
906 /* Maximum size of a single call to io_remap_pfn_range. */
907 /* I pulled this number out of thin air. */
908 const ulong max_chunk_size = 64 * 1024 * 1024;
909 ulong chunk_size, mapped_bytes = 0;
911 const struct gasket_driver_desc *driver_desc =
912 gasket_dev->internal_desc->driver_desc;
914 ulong bar_offset, virt_offset;
915 struct gasket_mappable_region region_to_map;
916 ulong phys_offset, map_length;
917 ulong virt_base, phys_base;
920 ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
922 return DO_MAP_REGION_INVALID;
924 if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
925 vma->vm_end - vma->vm_start,
926 ®ion_to_map, &virt_offset))
927 return DO_MAP_REGION_INVALID;
928 phys_offset = region_to_map.start;
929 map_length = region_to_map.length_bytes;
931 virt_base = vma->vm_start + virt_offset;
933 gasket_get_bar_index(gasket_dev,
934 (vma->vm_pgoff << PAGE_SHIFT) +
935 driver_desc->legacy_mmap_address_offset);
938 return DO_MAP_REGION_INVALID;
940 phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
941 while (mapped_bytes < map_length) {
943 * io_remap_pfn_range can take a while, so we chunk its
944 * calls and call cond_resched between each.
946 chunk_size = min(max_chunk_size, map_length - mapped_bytes);
949 ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
950 (phys_base + mapped_bytes) >>
951 PAGE_SHIFT, chunk_size,
954 dev_err(gasket_dev->dev,
955 "Error remapping PFN range.\n");
958 mapped_bytes += chunk_size;
961 return DO_MAP_REGION_SUCCESS;
964 /* Unmap the partial chunk we mapped. */
965 mappable_region->length_bytes = mapped_bytes;
966 if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
967 dev_err(gasket_dev->dev,
968 "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
970 (ulong)mapped_bytes);
972 return DO_MAP_REGION_FAILURE;
975 /* Map a region of coherent memory. */
976 static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
977 struct vm_area_struct *vma)
979 const struct gasket_driver_desc *driver_desc =
980 gasket_dev->internal_desc->driver_desc;
981 const ulong requested_length = vma->vm_end - vma->vm_start;
985 if (requested_length == 0 || requested_length >
986 gasket_dev->coherent_buffer.length_bytes) {
987 trace_gasket_mmap_exit(-EINVAL);
991 permissions = driver_desc->coherent_buffer_description.permissions;
992 if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
993 dev_err(gasket_dev->dev, "Permission checking failed.\n");
994 trace_gasket_mmap_exit(-EPERM);
998 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1000 ret = remap_pfn_range(vma, vma->vm_start,
1001 (gasket_dev->coherent_buffer.phys_base) >>
1002 PAGE_SHIFT, requested_length, vma->vm_page_prot);
1004 dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
1006 trace_gasket_mmap_exit(ret);
1010 /* Record the user virtual to dma_address mapping that was
1011 * created by the kernel.
1013 gasket_set_user_virt(gasket_dev, requested_length,
1014 gasket_dev->coherent_buffer.phys_base,
1019 /* Map a device's BARs into user space. */
1020 static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1024 int has_mapped_anything = 0;
1026 ulong raw_offset, vma_size;
1027 bool is_coherent_region;
1028 const struct gasket_driver_desc *driver_desc;
1029 struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1030 const struct gasket_bar_desc *bar_desc;
1031 struct gasket_mappable_region *map_regions = NULL;
1032 int num_map_regions = 0;
1033 enum do_map_region_status map_status;
1035 driver_desc = gasket_dev->internal_desc->driver_desc;
1037 if (vma->vm_start & ~PAGE_MASK) {
1038 dev_err(gasket_dev->dev,
1039 "Base address not page-aligned: 0x%lx\n",
1041 trace_gasket_mmap_exit(-EINVAL);
1045 /* Calculate the offset of this range into physical mem. */
1046 raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1047 driver_desc->legacy_mmap_address_offset;
1048 vma_size = vma->vm_end - vma->vm_start;
1049 trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1053 * Check if the raw offset is within a bar region. If not, check if it
1054 * is a coherent region.
1056 bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1057 is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1058 if (bar_index < 0 && !is_coherent_region) {
1059 dev_err(gasket_dev->dev,
1060 "Unable to find matching bar for address 0x%lx\n",
1062 trace_gasket_mmap_exit(bar_index);
1065 if (bar_index > 0 && is_coherent_region) {
1066 dev_err(gasket_dev->dev,
1067 "double matching bar and coherent buffers for address "
1070 trace_gasket_mmap_exit(bar_index);
1074 vma->vm_private_data = gasket_dev;
1076 if (is_coherent_region)
1077 return gasket_mmap_coherent(gasket_dev, vma);
1079 /* Everything in the rest of this function is for normal BAR mapping. */
1082 * Subtract the base of the bar from the raw offset to get the
1083 * memory location within the bar to map.
1085 bar_desc = &driver_desc->bar_descriptions[bar_index];
1086 permissions = bar_desc->permissions;
1087 if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1088 dev_err(gasket_dev->dev, "Permission checking failed.\n");
1089 trace_gasket_mmap_exit(-EPERM);
1093 if (driver_desc->get_mappable_regions_cb) {
1094 ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1101 if (!gasket_mmap_has_permissions(gasket_dev, vma,
1102 bar_desc->permissions)) {
1103 dev_err(gasket_dev->dev,
1104 "Permission checking failed.\n");
1105 trace_gasket_mmap_exit(-EPERM);
1108 num_map_regions = bar_desc->num_mappable_regions;
1109 map_regions = kcalloc(num_map_regions,
1110 sizeof(*bar_desc->mappable_regions),
1113 memcpy(map_regions, bar_desc->mappable_regions,
1115 sizeof(*bar_desc->mappable_regions));
1119 if (!map_regions || num_map_regions == 0) {
1120 dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1124 /* Marks the VMA's pages as uncacheable. */
1125 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1126 for (i = 0; i < num_map_regions; i++) {
1127 map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1128 /* Try the next region if this one was not mappable. */
1129 if (map_status == DO_MAP_REGION_INVALID)
1131 if (map_status == DO_MAP_REGION_FAILURE) {
1136 has_mapped_anything = 1;
1141 /* If we could not map any memory, the request was invalid. */
1142 if (!has_mapped_anything) {
1143 dev_err(gasket_dev->dev,
1144 "Map request did not contain a valid region.\n");
1145 trace_gasket_mmap_exit(-EINVAL);
1149 trace_gasket_mmap_exit(0);
1153 /* Need to unmap any mapped ranges. */
1154 num_map_regions = i;
1155 for (i = 0; i < num_map_regions; i++)
1156 if (gasket_mm_unmap_region(gasket_dev, vma,
1157 &bar_desc->mappable_regions[i]))
1158 dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1166 * Open the char device file.
1168 * If the open is for writing, and the device is not owned, this process becomes
1169 * the owner. If the open is for writing and the device is already owned by
1170 * some other process, it is an error. If this process is the owner, increment
1173 * Returns 0 if successful, a negative error number otherwise.
1175 static int gasket_open(struct inode *inode, struct file *filp)
1178 struct gasket_dev *gasket_dev;
1179 const struct gasket_driver_desc *driver_desc;
1180 struct gasket_ownership *ownership;
1181 char task_name[TASK_COMM_LEN];
1182 struct gasket_cdev_info *dev_info =
1183 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1184 struct pid_namespace *pid_ns = task_active_pid_ns(current);
1185 bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1187 gasket_dev = dev_info->gasket_dev_ptr;
1188 driver_desc = gasket_dev->internal_desc->driver_desc;
1189 ownership = &dev_info->ownership;
1190 get_task_comm(task_name, current);
1191 filp->private_data = gasket_dev;
1194 dev_dbg(gasket_dev->dev,
1195 "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
1196 "fmode_write: %d is_root: %u)\n",
1197 current->tgid, task_name, filp->f_mode,
1198 (filp->f_mode & FMODE_WRITE), is_root);
1200 /* Always allow non-writing accesses. */
1201 if (!(filp->f_mode & FMODE_WRITE)) {
1202 dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1206 mutex_lock(&gasket_dev->mutex);
1208 dev_dbg(gasket_dev->dev,
1209 "Current owner open count (owning tgid %u): %d.\n",
1210 ownership->owner, ownership->write_open_count);
1212 /* Opening a node owned by another TGID is an error (unless root) */
1213 if (ownership->is_owned && ownership->owner != current->tgid &&
1215 dev_err(gasket_dev->dev,
1216 "Process %u is opening a node held by %u.\n",
1217 current->tgid, ownership->owner);
1218 mutex_unlock(&gasket_dev->mutex);
1222 /* If the node is not owned, assign it to the current TGID. */
1223 if (!ownership->is_owned) {
1224 ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1225 driver_desc->device_open_cb);
1227 dev_err(gasket_dev->dev,
1228 "Error in device open cb: %d\n", ret);
1229 mutex_unlock(&gasket_dev->mutex);
1232 ownership->is_owned = 1;
1233 ownership->owner = current->tgid;
1234 dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1238 ownership->write_open_count++;
1240 dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1241 ownership->owner, ownership->write_open_count);
1243 mutex_unlock(&gasket_dev->mutex);
1248 * Called on a close of the device file. If this process is the owner,
1249 * decrement the open count. On last close by the owner, free up buffers and
1250 * eventfd contexts, and release ownership.
1252 * Returns 0 if successful, a negative error number otherwise.
1254 static int gasket_release(struct inode *inode, struct file *file)
1257 struct gasket_dev *gasket_dev;
1258 struct gasket_ownership *ownership;
1259 const struct gasket_driver_desc *driver_desc;
1260 char task_name[TASK_COMM_LEN];
1261 struct gasket_cdev_info *dev_info =
1262 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1263 struct pid_namespace *pid_ns = task_active_pid_ns(current);
1264 bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1266 gasket_dev = dev_info->gasket_dev_ptr;
1267 driver_desc = gasket_dev->internal_desc->driver_desc;
1268 ownership = &dev_info->ownership;
1269 get_task_comm(task_name, current);
1270 mutex_lock(&gasket_dev->mutex);
1272 dev_dbg(gasket_dev->dev,
1273 "Releasing device node. Call origin: tgid %u (%s) "
1274 "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1275 current->tgid, task_name, file->f_mode,
1276 (file->f_mode & FMODE_WRITE), is_root);
1277 dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1278 ownership->owner, ownership->write_open_count);
1280 if (file->f_mode & FMODE_WRITE) {
1281 ownership->write_open_count--;
1282 if (ownership->write_open_count == 0) {
1283 dev_dbg(gasket_dev->dev, "Device is now free\n");
1284 ownership->is_owned = 0;
1285 ownership->owner = 0;
1287 /* Forces chip reset before we unmap the page tables. */
1288 driver_desc->device_reset_cb(gasket_dev);
1290 for (i = 0; i < driver_desc->num_page_tables; ++i) {
1291 gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1292 gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1293 gasket_free_coherent_memory_all(gasket_dev, i);
1296 /* Closes device, enters power save. */
1297 gasket_check_and_invoke_callback_nolock(gasket_dev,
1298 driver_desc->device_close_cb);
1302 dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1303 ownership->owner, ownership->write_open_count);
1304 mutex_unlock(&gasket_dev->mutex);
1309 * Gasket ioctl dispatch function.
1311 * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1312 * ioctl_handler_cb registered in the driver description.
1313 * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1315 static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1317 struct gasket_dev *gasket_dev;
1318 const struct gasket_driver_desc *driver_desc;
1319 void __user *argp = (void __user *)arg;
1322 gasket_dev = (struct gasket_dev *)filp->private_data;
1323 driver_desc = gasket_dev->internal_desc->driver_desc;
1325 dev_dbg(gasket_dev->dev,
1326 "Unable to find device descriptor for file %s\n",
1327 d_path(&filp->f_path, path, 256));
1331 if (!gasket_is_supported_ioctl(cmd)) {
1333 * The ioctl handler is not a standard Gasket callback, since
1334 * it requires different arguments. This means we can't use
1335 * check_and_invoke_callback.
1337 if (driver_desc->ioctl_handler_cb)
1338 return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1340 dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1344 return gasket_handle_ioctl(filp, cmd, argp);
1347 /* File operations for all Gasket devices. */
1348 static const struct file_operations gasket_file_ops = {
1349 .owner = THIS_MODULE,
1350 .llseek = no_llseek,
1351 .mmap = gasket_mmap,
1352 .open = gasket_open,
1353 .release = gasket_release,
1354 .unlocked_ioctl = gasket_ioctl,
1357 /* Perform final init and marks the device as active. */
1358 int gasket_enable_device(struct gasket_dev *gasket_dev)
1362 const struct gasket_driver_desc *driver_desc =
1363 gasket_dev->internal_desc->driver_desc;
1365 ret = gasket_interrupt_init(gasket_dev, driver_desc->name,
1366 driver_desc->interrupt_type,
1367 driver_desc->interrupts,
1368 driver_desc->num_interrupts,
1369 driver_desc->interrupt_pack_width,
1370 driver_desc->interrupt_bar_index,
1371 driver_desc->wire_interrupt_offsets);
1373 dev_err(gasket_dev->dev,
1374 "Critical failure to allocate interrupts: %d\n", ret);
1375 gasket_interrupt_cleanup(gasket_dev);
1379 for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1380 dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1382 ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1383 &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1384 &driver_desc->page_table_configs[tbl_idx],
1386 gasket_dev->pci_dev);
1388 dev_err(gasket_dev->dev,
1389 "Couldn't init page table %d: %d\n",
1394 * Make sure that the page table is clear and set to simple
1397 gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1401 * hardware_revision_cb returns a positive integer (the rev) if
1404 ret = check_and_invoke_callback(gasket_dev,
1405 driver_desc->hardware_revision_cb);
1407 dev_err(gasket_dev->dev,
1408 "Error getting hardware revision: %d\n", ret);
1411 gasket_dev->hardware_revision = ret;
1413 /* device_status_cb returns a device status, not an error code. */
1414 gasket_dev->status = gasket_get_hw_status(gasket_dev);
1415 if (gasket_dev->status == GASKET_STATUS_DEAD)
1416 dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1418 ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1419 driver_desc->module);
1425 EXPORT_SYMBOL(gasket_enable_device);
1428 * Add PCI gasket device.
1430 * Called by Gasket device probe function.
1431 * Allocates device metadata and maps device memory. The device driver must
1432 * call gasket_enable_device after driver init is complete to place the device
1435 int gasket_pci_add_device(struct pci_dev *pci_dev,
1436 struct gasket_dev **gasket_devp)
1439 const char *kobj_name = dev_name(&pci_dev->dev);
1440 struct gasket_internal_desc *internal_desc;
1441 struct gasket_dev *gasket_dev;
1442 const struct gasket_driver_desc *driver_desc;
1443 struct device *parent;
1445 pr_debug("add PCI device %s\n", kobj_name);
1447 mutex_lock(&g_mutex);
1448 internal_desc = lookup_internal_desc(pci_dev);
1449 mutex_unlock(&g_mutex);
1450 if (!internal_desc) {
1451 dev_err(&pci_dev->dev,
1452 "PCI add device called for unknown driver type\n");
1456 driver_desc = internal_desc->driver_desc;
1458 parent = &pci_dev->dev;
1459 ret = gasket_alloc_dev(internal_desc, parent, &gasket_dev, kobj_name);
1462 gasket_dev->pci_dev = pci_dev;
1463 if (IS_ERR_OR_NULL(gasket_dev->dev_info.device)) {
1464 pr_err("Cannot create %s device %s [ret = %ld]\n",
1465 driver_desc->name, gasket_dev->dev_info.name,
1466 PTR_ERR(gasket_dev->dev_info.device));
1471 ret = gasket_setup_pci(pci_dev, gasket_dev);
1475 ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1481 * Once we've created the mapping structures successfully, attempt to
1482 * create a symlink to the pci directory of this object.
1484 ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1485 &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1487 dev_err(gasket_dev->dev,
1488 "Cannot create sysfs pci link: %d\n", ret);
1491 ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1492 gasket_sysfs_generic_attrs);
1496 *gasket_devp = gasket_dev;
1501 gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1503 gasket_cleanup_pci(gasket_dev);
1504 device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1506 gasket_free_dev(gasket_dev);
1509 EXPORT_SYMBOL(gasket_pci_add_device);
1511 /* Remove a PCI gasket device. */
1512 void gasket_pci_remove_device(struct pci_dev *pci_dev)
1515 struct gasket_internal_desc *internal_desc;
1516 struct gasket_dev *gasket_dev = NULL;
1517 const struct gasket_driver_desc *driver_desc;
1518 /* Find the device desc. */
1519 mutex_lock(&g_mutex);
1520 internal_desc = lookup_internal_desc(pci_dev);
1521 if (!internal_desc) {
1522 mutex_unlock(&g_mutex);
1525 mutex_unlock(&g_mutex);
1527 driver_desc = internal_desc->driver_desc;
1529 /* Now find the specific device */
1530 mutex_lock(&internal_desc->mutex);
1531 for (i = 0; i < GASKET_DEV_MAX; i++) {
1532 if (internal_desc->devs[i] &&
1533 internal_desc->devs[i]->pci_dev == pci_dev) {
1534 gasket_dev = internal_desc->devs[i];
1538 mutex_unlock(&internal_desc->mutex);
1543 dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1544 internal_desc->driver_desc->name);
1546 gasket_cleanup_pci(gasket_dev);
1548 gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1549 device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1550 gasket_free_dev(gasket_dev);
1552 EXPORT_SYMBOL(gasket_pci_remove_device);
1555 * Lookup a name by number in a num_name table.
1556 * @num: Number to lookup.
1557 * @table: Array of num_name structures, the table for the lookup.
1559 * Description: Searches for num in the table. If found, the
1560 * corresponding name is returned; otherwise NULL
1563 * The table must have a NULL name pointer at the end.
1565 const char *gasket_num_name_lookup(uint num,
1566 const struct gasket_num_name *table)
1570 while (table[i].snn_name) {
1571 if (num == table[i].snn_num)
1576 return table[i].snn_name;
1578 EXPORT_SYMBOL(gasket_num_name_lookup);
1580 int gasket_reset(struct gasket_dev *gasket_dev)
1584 mutex_lock(&gasket_dev->mutex);
1585 ret = gasket_reset_nolock(gasket_dev);
1586 mutex_unlock(&gasket_dev->mutex);
1589 EXPORT_SYMBOL(gasket_reset);
1591 int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1595 const struct gasket_driver_desc *driver_desc;
1597 driver_desc = gasket_dev->internal_desc->driver_desc;
1598 if (!driver_desc->device_reset_cb)
1601 ret = driver_desc->device_reset_cb(gasket_dev);
1603 dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1608 /* Reinitialize the page tables and interrupt framework. */
1609 for (i = 0; i < driver_desc->num_page_tables; ++i)
1610 gasket_page_table_reset(gasket_dev->page_table[i]);
1612 ret = gasket_interrupt_reinit(gasket_dev);
1614 dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1619 /* Get current device health. */
1620 gasket_dev->status = gasket_get_hw_status(gasket_dev);
1621 if (gasket_dev->status == GASKET_STATUS_DEAD) {
1622 dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1628 EXPORT_SYMBOL(gasket_reset_nolock);
1630 gasket_ioctl_permissions_cb_t
1631 gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1633 return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1635 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1637 /* Get the driver structure for a given gasket_dev.
1638 * @dev: pointer to gasket_dev, implementing the requested driver.
1640 const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1642 return dev->internal_desc->driver_desc;
1645 /* Get the device structure for a given gasket_dev.
1646 * @dev: pointer to gasket_dev, implementing the requested driver.
1648 struct device *gasket_get_device(struct gasket_dev *dev)
1654 * Asynchronously waits on device.
1655 * @gasket_dev: Device struct.
1657 * @offset: Register offset
1658 * @mask: Register mask
1659 * @val: Expected value
1660 * @max_retries: number of sleep periods
1661 * @delay_ms: Timeout in milliseconds
1663 * Description: Busy waits for a specific combination of bits to be set on a
1666 int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1667 u64 offset, u64 mask, u64 val,
1668 uint max_retries, u64 delay_ms)
1673 while (retries < max_retries) {
1674 tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1675 if ((tmp & mask) == val)
1680 dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1681 __func__, offset, max_retries * delay_ms);
1684 EXPORT_SYMBOL(gasket_wait_with_reschedule);
1686 /* See gasket_core.h for description. */
1687 int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1691 struct gasket_internal_desc *internal;
1693 pr_debug("Loading %s driver version %s\n", driver_desc->name,
1694 driver_desc->driver_version);
1695 /* Check for duplicates and find a free slot. */
1696 mutex_lock(&g_mutex);
1698 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1699 if (g_descs[i].driver_desc == driver_desc) {
1700 pr_err("%s driver already loaded/registered\n",
1702 mutex_unlock(&g_mutex);
1707 /* This and the above loop could be combined, but this reads easier. */
1708 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1709 if (!g_descs[i].driver_desc) {
1710 g_descs[i].driver_desc = driver_desc;
1715 mutex_unlock(&g_mutex);
1717 if (desc_idx == -1) {
1718 pr_err("too many drivers loaded, max %d\n",
1719 GASKET_FRAMEWORK_DESC_MAX);
1723 internal = &g_descs[desc_idx];
1724 mutex_init(&internal->mutex);
1725 memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1727 class_create(driver_desc->module, driver_desc->name);
1729 if (IS_ERR(internal->class)) {
1730 pr_err("Cannot register %s class [ret=%ld]\n",
1731 driver_desc->name, PTR_ERR(internal->class));
1732 ret = PTR_ERR(internal->class);
1733 goto unregister_gasket_driver;
1736 ret = register_chrdev_region(MKDEV(driver_desc->major,
1737 driver_desc->minor), GASKET_DEV_MAX,
1740 pr_err("cannot register %s char driver [ret=%d]\n",
1741 driver_desc->name, ret);
1748 class_destroy(internal->class);
1750 unregister_gasket_driver:
1751 mutex_lock(&g_mutex);
1752 g_descs[desc_idx].driver_desc = NULL;
1753 mutex_unlock(&g_mutex);
1756 EXPORT_SYMBOL(gasket_register_device);
1758 /* See gasket_core.h for description. */
1759 void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1762 struct gasket_internal_desc *internal_desc = NULL;
1764 mutex_lock(&g_mutex);
1765 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1766 if (g_descs[i].driver_desc == driver_desc) {
1767 internal_desc = &g_descs[i];
1773 if (!internal_desc) {
1774 mutex_unlock(&g_mutex);
1775 pr_err("request to unregister unknown desc: %s, %d:%d\n",
1776 driver_desc->name, driver_desc->major,
1777 driver_desc->minor);
1781 unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1784 class_destroy(internal_desc->class);
1786 /* Finally, effectively "remove" the driver. */
1787 g_descs[desc_idx].driver_desc = NULL;
1788 mutex_unlock(&g_mutex);
1790 pr_debug("removed %s driver\n", driver_desc->name);
1792 EXPORT_SYMBOL(gasket_unregister_device);
1794 static int __init gasket_init(void)
1798 pr_debug("%s\n", __func__);
1799 mutex_lock(&g_mutex);
1800 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1801 g_descs[i].driver_desc = NULL;
1802 mutex_init(&g_descs[i].mutex);
1805 gasket_sysfs_init();
1807 mutex_unlock(&g_mutex);
1811 static void __exit gasket_exit(void)
1813 pr_debug("%s\n", __func__);
1815 MODULE_DESCRIPTION("Google Gasket driver framework");
1816 MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1817 MODULE_LICENSE("GPL v2");
1818 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1819 module_init(gasket_init);
1820 module_exit(gasket_exit);