GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / staging / gasket / gasket_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Gasket generic driver framework. This file contains the implementation
4  * for the Gasket generic driver framework - the functionality that is common
5  * across Gasket devices.
6  *
7  * Copyright (C) 2018 Google, Inc.
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include "gasket_core.h"
13
14 #include "gasket_interrupt.h"
15 #include "gasket_ioctl.h"
16 #include "gasket_page_table.h"
17 #include "gasket_sysfs.h"
18
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29
30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/gasket_mmap.h>
33 #else
34 #define trace_gasket_mmap_exit(x)
35 #define trace_gasket_mmap_entry(x, ...)
36 #endif
37
38 /*
39  * "Private" members of gasket_driver_desc.
40  *
41  * Contains internal per-device type tracking data, i.e., data not appropriate
42  * as part of the public interface for the generic framework.
43  */
44 struct gasket_internal_desc {
45         /* Device-specific-driver-provided configuration information. */
46         const struct gasket_driver_desc *driver_desc;
47
48         /* Protects access to per-driver data (i.e. this structure). */
49         struct mutex mutex;
50
51         /* Kernel-internal device class. */
52         struct class *class;
53
54         /* Instantiated / present devices of this type. */
55         struct gasket_dev *devs[GASKET_DEV_MAX];
56 };
57
58 /* do_map_region() needs be able to return more than just true/false. */
59 enum do_map_region_status {
60         /* The region was successfully mapped. */
61         DO_MAP_REGION_SUCCESS,
62
63         /* Attempted to map region and failed. */
64         DO_MAP_REGION_FAILURE,
65
66         /* The requested region to map was not part of a mappable region. */
67         DO_MAP_REGION_INVALID,
68 };
69
70 /* Global data definitions. */
71 /* Mutex - only for framework-wide data. Other data should be protected by
72  * finer-grained locks.
73  */
74 static DEFINE_MUTEX(g_mutex);
75
76 /* List of all registered device descriptions & their supporting data. */
77 static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
78
79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
80 static const struct gasket_num_name gasket_status_name_table[] = {
81         { GASKET_STATUS_DEAD, "DEAD" },
82         { GASKET_STATUS_ALIVE, "ALIVE" },
83         { GASKET_STATUS_LAMED, "LAMED" },
84         { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
85         { 0, NULL },
86 };
87
88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
89 enum gasket_sysfs_attribute_type {
90         ATTR_BAR_OFFSETS,
91         ATTR_BAR_SIZES,
92         ATTR_DRIVER_VERSION,
93         ATTR_FRAMEWORK_VERSION,
94         ATTR_DEVICE_TYPE,
95         ATTR_HARDWARE_REVISION,
96         ATTR_PCI_ADDRESS,
97         ATTR_STATUS,
98         ATTR_IS_DEVICE_OWNED,
99         ATTR_DEVICE_OWNER,
100         ATTR_WRITE_OPEN_COUNT,
101         ATTR_RESET_COUNT,
102         ATTR_USER_MEM_RANGES
103 };
104
105 /* Perform a standard Gasket callback. */
106 static inline int
107 check_and_invoke_callback(struct gasket_dev *gasket_dev,
108                           int (*cb_function)(struct gasket_dev *))
109 {
110         int ret = 0;
111
112         dev_dbg(gasket_dev->dev, "check_and_invoke_callback %p\n",
113                 cb_function);
114         if (cb_function) {
115                 mutex_lock(&gasket_dev->mutex);
116                 ret = cb_function(gasket_dev);
117                 mutex_unlock(&gasket_dev->mutex);
118         }
119         return ret;
120 }
121
122 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
123 static inline int
124 gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
125                                         int (*cb_function)(struct gasket_dev *))
126 {
127         int ret = 0;
128
129         if (cb_function) {
130                 dev_dbg(gasket_dev->dev,
131                         "Invoking device-specific callback.\n");
132                 ret = cb_function(gasket_dev);
133         }
134         return ret;
135 }
136
137 /*
138  * Return nonzero if the gasket_cdev_info is owned by the current thread group
139  * ID.
140  */
141 static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
142 {
143         return (info->ownership.is_owned &&
144                 (info->ownership.owner == current->tgid));
145 }
146
147 /*
148  * Find the next free gasket_internal_dev slot.
149  *
150  * Returns the located slot number on success or a negative number on failure.
151  */
152 static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
153                                 const char *kobj_name)
154 {
155         int i;
156
157         mutex_lock(&internal_desc->mutex);
158
159         /* Search for a previous instance of this device. */
160         for (i = 0; i < GASKET_DEV_MAX; i++) {
161                 if (internal_desc->devs[i] &&
162                     strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
163                         pr_err("Duplicate device %s\n", kobj_name);
164                         mutex_unlock(&internal_desc->mutex);
165                         return -EBUSY;
166                 }
167         }
168
169         /* Find a free device slot. */
170         for (i = 0; i < GASKET_DEV_MAX; i++) {
171                 if (!internal_desc->devs[i])
172                         break;
173         }
174
175         if (i == GASKET_DEV_MAX) {
176                 pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
177                 mutex_unlock(&internal_desc->mutex);
178                 return -EBUSY;
179         }
180
181         mutex_unlock(&internal_desc->mutex);
182         return i;
183 }
184
185 /*
186  * Allocate and initialize a Gasket device structure, add the device to the
187  * device list.
188  *
189  * Returns 0 if successful, a negative error code otherwise.
190  */
191 static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
192                             struct device *parent, struct gasket_dev **pdev,
193                             const char *kobj_name)
194 {
195         int dev_idx;
196         const struct gasket_driver_desc *driver_desc =
197                 internal_desc->driver_desc;
198         struct gasket_dev *gasket_dev;
199         struct gasket_cdev_info *dev_info;
200
201         pr_debug("Allocating a Gasket device %s.\n", kobj_name);
202
203         *pdev = NULL;
204
205         dev_idx = gasket_find_dev_slot(internal_desc, kobj_name);
206         if (dev_idx < 0)
207                 return dev_idx;
208
209         gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
210         if (!gasket_dev) {
211                 pr_err("no memory for device %s\n", kobj_name);
212                 return -ENOMEM;
213         }
214         internal_desc->devs[dev_idx] = gasket_dev;
215
216         mutex_init(&gasket_dev->mutex);
217
218         gasket_dev->internal_desc = internal_desc;
219         gasket_dev->dev_idx = dev_idx;
220         snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", kobj_name);
221         gasket_dev->dev = get_device(parent);
222         /* gasket_bar_data is uninitialized. */
223         gasket_dev->num_page_tables = driver_desc->num_page_tables;
224         /* max_page_table_size and *page table are uninit'ed */
225         /* interrupt_data is not initialized. */
226         /* status is 0, or GASKET_STATUS_DEAD */
227
228         dev_info = &gasket_dev->dev_info;
229         snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
230                  gasket_dev->dev_idx);
231         dev_info->devt =
232                 MKDEV(driver_desc->major, driver_desc->minor +
233                       gasket_dev->dev_idx);
234         dev_info->device = device_create(internal_desc->class, parent,
235                 dev_info->devt, gasket_dev, dev_info->name);
236
237         dev_dbg(dev_info->device, "Gasket device allocated.\n");
238
239         /* cdev has not yet been added; cdev_added is 0 */
240         dev_info->gasket_dev_ptr = gasket_dev;
241         /* ownership is all 0, indicating no owner or opens. */
242
243         return 0;
244 }
245
246 /* Free a Gasket device. */
247 static void gasket_free_dev(struct gasket_dev *gasket_dev)
248 {
249         struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
250
251         mutex_lock(&internal_desc->mutex);
252         internal_desc->devs[gasket_dev->dev_idx] = NULL;
253         mutex_unlock(&internal_desc->mutex);
254         put_device(gasket_dev->dev);
255         kfree(gasket_dev);
256 }
257
258 /*
259  * Maps the specified bar into kernel space.
260  *
261  * Returns 0 on success, a negative error code otherwise.
262  * A zero-sized BAR will not be mapped, but is not an error.
263  */
264 static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
265 {
266         struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
267         const struct gasket_driver_desc *driver_desc =
268                 internal_desc->driver_desc;
269         ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
270         int ret;
271
272         if (desc_bytes == 0)
273                 return 0;
274
275         if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
276                 /* not PCI: skip this entry */
277                 return 0;
278         }
279         /*
280          * pci_resource_start and pci_resource_len return a "resource_size_t",
281          * which is safely castable to ulong (which itself is the arg to
282          * request_mem_region).
283          */
284         gasket_dev->bar_data[bar_num].phys_base =
285                 (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
286         if (!gasket_dev->bar_data[bar_num].phys_base) {
287                 dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
288                         bar_num);
289                 return -EINVAL;
290         }
291
292         gasket_dev->bar_data[bar_num].length_bytes =
293                 (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
294         if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
295                 dev_err(gasket_dev->dev,
296                         "PCI BAR %u space is too small: %lu; expected >= %lu\n",
297                         bar_num, gasket_dev->bar_data[bar_num].length_bytes,
298                         desc_bytes);
299                 return -ENOMEM;
300         }
301
302         if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
303                                 gasket_dev->bar_data[bar_num].length_bytes,
304                                 gasket_dev->dev_info.name)) {
305                 dev_err(gasket_dev->dev,
306                         "Cannot get BAR %d memory region %p\n",
307                         bar_num, &gasket_dev->pci_dev->resource[bar_num]);
308                 return -EINVAL;
309         }
310
311         gasket_dev->bar_data[bar_num].virt_base =
312                 ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
313                                 gasket_dev->bar_data[bar_num].length_bytes);
314         if (!gasket_dev->bar_data[bar_num].virt_base) {
315                 dev_err(gasket_dev->dev,
316                         "Cannot remap BAR %d memory region %p\n",
317                         bar_num, &gasket_dev->pci_dev->resource[bar_num]);
318                 ret = -ENOMEM;
319                 goto fail;
320         }
321
322         dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
323         dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
324
325         return 0;
326
327 fail:
328         iounmap(gasket_dev->bar_data[bar_num].virt_base);
329         release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
330                            gasket_dev->bar_data[bar_num].length_bytes);
331         return ret;
332 }
333
334 /*
335  * Releases PCI BAR mapping.
336  *
337  * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
338  */
339 static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
340 {
341         ulong base, bytes;
342         struct gasket_internal_desc *internal_desc = dev->internal_desc;
343         const struct gasket_driver_desc *driver_desc =
344                 internal_desc->driver_desc;
345
346         if (driver_desc->bar_descriptions[bar_num].size == 0 ||
347             !dev->bar_data[bar_num].virt_base)
348                 return;
349
350         if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
351                 return;
352
353         iounmap(dev->bar_data[bar_num].virt_base);
354         dev->bar_data[bar_num].virt_base = NULL;
355
356         base = pci_resource_start(dev->pci_dev, bar_num);
357         if (!base) {
358                 dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
359                         bar_num);
360                 return;
361         }
362
363         bytes = pci_resource_len(dev->pci_dev, bar_num);
364         release_mem_region(base, bytes);
365 }
366
367 /*
368  * Setup PCI memory mapping for the specified device.
369  *
370  * Reads the BAR registers and sets up pointers to the device's memory mapped
371  * IO space.
372  *
373  * Returns 0 on success and a negative value otherwise.
374  */
375 static int gasket_setup_pci(struct pci_dev *pci_dev,
376                             struct gasket_dev *gasket_dev)
377 {
378         int i, mapped_bars, ret;
379
380         for (i = 0; i < GASKET_NUM_BARS; i++) {
381                 ret = gasket_map_pci_bar(gasket_dev, i);
382                 if (ret) {
383                         mapped_bars = i;
384                         goto fail;
385                 }
386         }
387
388         return 0;
389
390 fail:
391         for (i = 0; i < mapped_bars; i++)
392                 gasket_unmap_pci_bar(gasket_dev, i);
393
394         return -ENOMEM;
395 }
396
397 /* Unmaps memory for the specified device. */
398 static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
399 {
400         int i;
401
402         for (i = 0; i < GASKET_NUM_BARS; i++)
403                 gasket_unmap_pci_bar(gasket_dev, i);
404 }
405
406 /* Determine the health of the Gasket device. */
407 static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
408 {
409         int status;
410         int i;
411         const struct gasket_driver_desc *driver_desc =
412                 gasket_dev->internal_desc->driver_desc;
413
414         status = gasket_check_and_invoke_callback_nolock(gasket_dev,
415                                                          driver_desc->device_status_cb);
416         if (status != GASKET_STATUS_ALIVE) {
417                 dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
418                         status);
419                 return status;
420         }
421
422         status = gasket_interrupt_system_status(gasket_dev);
423         if (status != GASKET_STATUS_ALIVE) {
424                 dev_dbg(gasket_dev->dev,
425                         "Interrupt system reported status %d.\n", status);
426                 return status;
427         }
428
429         for (i = 0; i < driver_desc->num_page_tables; ++i) {
430                 status = gasket_page_table_system_status(gasket_dev->page_table[i]);
431                 if (status != GASKET_STATUS_ALIVE) {
432                         dev_dbg(gasket_dev->dev,
433                                 "Page table %d reported status %d.\n",
434                                 i, status);
435                         return status;
436                 }
437         }
438
439         return GASKET_STATUS_ALIVE;
440 }
441
442 static ssize_t
443 gasket_write_mappable_regions(char *buf,
444                               const struct gasket_driver_desc *driver_desc,
445                               int bar_index)
446 {
447         int i;
448         ssize_t written;
449         ssize_t total_written = 0;
450         ulong min_addr, max_addr;
451         struct gasket_bar_desc bar_desc =
452                 driver_desc->bar_descriptions[bar_index];
453
454         if (bar_desc.permissions == GASKET_NOMAP)
455                 return 0;
456         for (i = 0;
457              i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
458              i++) {
459                 min_addr = bar_desc.mappable_regions[i].start -
460                            driver_desc->legacy_mmap_address_offset;
461                 max_addr = bar_desc.mappable_regions[i].start -
462                            driver_desc->legacy_mmap_address_offset +
463                            bar_desc.mappable_regions[i].length_bytes;
464                 written = scnprintf(buf, PAGE_SIZE - total_written,
465                                     "0x%08lx-0x%08lx\n", min_addr, max_addr);
466                 total_written += written;
467                 buf += written;
468         }
469         return total_written;
470 }
471
472 static ssize_t gasket_sysfs_data_show(struct device *device,
473                                       struct device_attribute *attr, char *buf)
474 {
475         int i, ret = 0;
476         ssize_t current_written = 0;
477         const struct gasket_driver_desc *driver_desc;
478         struct gasket_dev *gasket_dev;
479         struct gasket_sysfs_attribute *gasket_attr;
480         const struct gasket_bar_desc *bar_desc;
481         enum gasket_sysfs_attribute_type sysfs_type;
482
483         gasket_dev = gasket_sysfs_get_device_data(device);
484         if (!gasket_dev) {
485                 dev_err(device, "No sysfs mapping found for device\n");
486                 return 0;
487         }
488
489         gasket_attr = gasket_sysfs_get_attr(device, attr);
490         if (!gasket_attr) {
491                 dev_err(device, "No sysfs attr found for device\n");
492                 gasket_sysfs_put_device_data(device, gasket_dev);
493                 return 0;
494         }
495
496         driver_desc = gasket_dev->internal_desc->driver_desc;
497
498         sysfs_type =
499                 (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
500         switch (sysfs_type) {
501         case ATTR_BAR_OFFSETS:
502                 for (i = 0; i < GASKET_NUM_BARS; i++) {
503                         bar_desc = &driver_desc->bar_descriptions[i];
504                         if (bar_desc->size == 0)
505                                 continue;
506                         current_written =
507                                 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
508                                          (ulong)bar_desc->base);
509                         buf += current_written;
510                         ret += current_written;
511                 }
512                 break;
513         case ATTR_BAR_SIZES:
514                 for (i = 0; i < GASKET_NUM_BARS; i++) {
515                         bar_desc = &driver_desc->bar_descriptions[i];
516                         if (bar_desc->size == 0)
517                                 continue;
518                         current_written =
519                                 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
520                                          (ulong)bar_desc->size);
521                         buf += current_written;
522                         ret += current_written;
523                 }
524                 break;
525         case ATTR_DRIVER_VERSION:
526                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
527                                gasket_dev->internal_desc->driver_desc->driver_version);
528                 break;
529         case ATTR_FRAMEWORK_VERSION:
530                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
531                                GASKET_FRAMEWORK_VERSION);
532                 break;
533         case ATTR_DEVICE_TYPE:
534                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
535                                gasket_dev->internal_desc->driver_desc->name);
536                 break;
537         case ATTR_HARDWARE_REVISION:
538                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
539                                gasket_dev->hardware_revision);
540                 break;
541         case ATTR_PCI_ADDRESS:
542                 ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
543                 break;
544         case ATTR_STATUS:
545                 ret = snprintf(buf, PAGE_SIZE, "%s\n",
546                                gasket_num_name_lookup(gasket_dev->status,
547                                                       gasket_status_name_table));
548                 break;
549         case ATTR_IS_DEVICE_OWNED:
550                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
551                                gasket_dev->dev_info.ownership.is_owned);
552                 break;
553         case ATTR_DEVICE_OWNER:
554                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
555                                gasket_dev->dev_info.ownership.owner);
556                 break;
557         case ATTR_WRITE_OPEN_COUNT:
558                 ret = snprintf(buf, PAGE_SIZE, "%d\n",
559                                gasket_dev->dev_info.ownership.write_open_count);
560                 break;
561         case ATTR_RESET_COUNT:
562                 ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
563                 break;
564         case ATTR_USER_MEM_RANGES:
565                 for (i = 0; i < GASKET_NUM_BARS; ++i) {
566                         current_written =
567                                 gasket_write_mappable_regions(buf, driver_desc,
568                                                               i);
569                         buf += current_written;
570                         ret += current_written;
571                 }
572                 break;
573         default:
574                 dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
575                         attr->attr.name);
576                 ret = 0;
577                 break;
578         }
579
580         gasket_sysfs_put_attr(device, gasket_attr);
581         gasket_sysfs_put_device_data(device, gasket_dev);
582         return ret;
583 }
584
585 /* These attributes apply to all Gasket driver instances. */
586 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
587         GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
588         GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
589         GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
590                         ATTR_DRIVER_VERSION),
591         GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
592                         ATTR_FRAMEWORK_VERSION),
593         GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
594         GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
595                         ATTR_HARDWARE_REVISION),
596         GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
597         GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
598         GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
599                         ATTR_IS_DEVICE_OWNED),
600         GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
601                         ATTR_DEVICE_OWNER),
602         GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
603                         ATTR_WRITE_OPEN_COUNT),
604         GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
605         GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
606                         ATTR_USER_MEM_RANGES),
607         GASKET_END_OF_ATTR_ARRAY
608 };
609
610 /* Add a char device and related info. */
611 static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
612                            const struct file_operations *file_ops,
613                            struct module *owner)
614 {
615         int ret;
616
617         cdev_init(&dev_info->cdev, file_ops);
618         dev_info->cdev.owner = owner;
619         ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
620         if (ret) {
621                 dev_err(dev_info->gasket_dev_ptr->dev,
622                         "cannot add char device [ret=%d]\n", ret);
623                 return ret;
624         }
625         dev_info->cdev_added = 1;
626
627         return 0;
628 }
629
630 /* Disable device operations. */
631 void gasket_disable_device(struct gasket_dev *gasket_dev)
632 {
633         const struct gasket_driver_desc *driver_desc =
634                 gasket_dev->internal_desc->driver_desc;
635         int i;
636
637         /* Only delete the device if it has been successfully added. */
638         if (gasket_dev->dev_info.cdev_added)
639                 cdev_del(&gasket_dev->dev_info.cdev);
640
641         gasket_dev->status = GASKET_STATUS_DEAD;
642
643         gasket_interrupt_cleanup(gasket_dev);
644
645         for (i = 0; i < driver_desc->num_page_tables; ++i) {
646                 if (gasket_dev->page_table[i]) {
647                         gasket_page_table_reset(gasket_dev->page_table[i]);
648                         gasket_page_table_cleanup(gasket_dev->page_table[i]);
649                 }
650         }
651 }
652 EXPORT_SYMBOL(gasket_disable_device);
653
654 /*
655  * Registered descriptor lookup.
656  *
657  * Precondition: Called with g_mutex held (to avoid a race on return).
658  * Returns NULL if no matching device was found.
659  */
660 static struct gasket_internal_desc *
661 lookup_internal_desc(struct pci_dev *pci_dev)
662 {
663         int i;
664
665         __must_hold(&g_mutex);
666         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
667                 if (g_descs[i].driver_desc &&
668                     g_descs[i].driver_desc->pci_id_table &&
669                     pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
670                         return &g_descs[i];
671         }
672
673         return NULL;
674 }
675
676 /*
677  * Verifies that the user has permissions to perform the requested mapping and
678  * that the provided descriptor/range is of adequate size to hold the range to
679  * be mapped.
680  */
681 static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
682                                         struct vm_area_struct *vma,
683                                         int bar_permissions)
684 {
685         int requested_permissions;
686         /* Always allow sysadmin to access. */
687         if (capable(CAP_SYS_ADMIN))
688                 return true;
689
690         /* Never allow non-sysadmins to access to a dead device. */
691         if (gasket_dev->status != GASKET_STATUS_ALIVE) {
692                 dev_dbg(gasket_dev->dev, "Device is dead.\n");
693                 return false;
694         }
695
696         /* Make sure that no wrong flags are set. */
697         requested_permissions =
698                 (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
699         if (requested_permissions & ~(bar_permissions)) {
700                 dev_dbg(gasket_dev->dev,
701                         "Attempting to map a region with requested permissions "
702                         "0x%x, but region has permissions 0x%x.\n",
703                         requested_permissions, bar_permissions);
704                 return false;
705         }
706
707         /* Do not allow a non-owner to write. */
708         if ((vma->vm_flags & VM_WRITE) &&
709             !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
710                 dev_dbg(gasket_dev->dev,
711                         "Attempting to mmap a region for write without owning "
712                         "device.\n");
713                 return false;
714         }
715
716         return true;
717 }
718
719 /*
720  * Verifies that the input address is within the region allocated to coherent
721  * buffer.
722  */
723 static bool
724 gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
725                           ulong address)
726 {
727         struct gasket_coherent_buffer_desc coh_buff_desc =
728                 driver_desc->coherent_buffer_description;
729
730         if (coh_buff_desc.permissions != GASKET_NOMAP) {
731                 if ((address >= coh_buff_desc.base) &&
732                     (address < coh_buff_desc.base + coh_buff_desc.size)) {
733                         return true;
734                 }
735         }
736         return false;
737 }
738
739 static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
740                                 ulong phys_addr)
741 {
742         int i;
743         const struct gasket_driver_desc *driver_desc;
744
745         driver_desc = gasket_dev->internal_desc->driver_desc;
746         for (i = 0; i < GASKET_NUM_BARS; ++i) {
747                 struct gasket_bar_desc bar_desc =
748                         driver_desc->bar_descriptions[i];
749
750                 if (bar_desc.permissions != GASKET_NOMAP) {
751                         if (phys_addr >= bar_desc.base &&
752                             phys_addr < (bar_desc.base + bar_desc.size)) {
753                                 return i;
754                         }
755                 }
756         }
757         /* If we haven't found the address by now, it is invalid. */
758         return -EINVAL;
759 }
760
761 /*
762  * Sets the actual bounds to map, given the device's mappable region.
763  *
764  * Given the device's mappable region, along with the user-requested mapping
765  * start offset and length of the user region, determine how much of this
766  * mappable region can be mapped into the user's region (start/end offsets),
767  * and the physical offset (phys_offset) into the BAR where the mapping should
768  * begin (either the VMA's or region lower bound).
769  *
770  * In other words, this calculates the overlap between the VMA
771  * (bar_offset, requested_length) and the given gasket_mappable_region.
772  *
773  * Returns true if there's anything to map, and false otherwise.
774  */
775 static bool
776 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
777                             ulong bar_offset, ulong requested_length,
778                             struct gasket_mappable_region *mappable_region,
779                             ulong *virt_offset)
780 {
781         ulong range_start = region->start;
782         ulong range_length = region->length_bytes;
783         ulong range_end = range_start + range_length;
784
785         *virt_offset = 0;
786         if (bar_offset + requested_length < range_start) {
787                 /*
788                  * If the requested region is completely below the range,
789                  * there is nothing to map.
790                  */
791                 return false;
792         } else if (bar_offset <= range_start) {
793                 /* If the bar offset is below this range's start
794                  * but the requested length continues into it:
795                  * 1) Only map starting from the beginning of this
796                  *      range's phys. offset, so we don't map unmappable
797                  *      memory.
798                  * 2) The length of the virtual memory to not map is the
799                  *      delta between the bar offset and the
800                  *      mappable start (and since the mappable start is
801                  *      bigger, start - req.)
802                  * 3) The map length is the minimum of the mappable
803                  *      requested length (requested_length - virt_offset)
804                  *      and the actual mappable length of the range.
805                  */
806                 mappable_region->start = range_start;
807                 *virt_offset = range_start - bar_offset;
808                 mappable_region->length_bytes =
809                         min(requested_length - *virt_offset, range_length);
810                 return true;
811         } else if (bar_offset > range_start &&
812                    bar_offset < range_end) {
813                 /*
814                  * If the bar offset is within this range:
815                  * 1) Map starting from the bar offset.
816                  * 2) Because there is no forbidden memory between the
817                  *      bar offset and the range start,
818                  *      virt_offset is 0.
819                  * 3) The map length is the minimum of the requested
820                  *      length and the remaining length in the buffer
821                  *      (range_end - bar_offset)
822                  */
823                 mappable_region->start = bar_offset;
824                 *virt_offset = 0;
825                 mappable_region->length_bytes =
826                         min(requested_length, range_end - bar_offset);
827                 return true;
828         }
829
830         /*
831          * If the requested [start] offset is above range_end,
832          * there's nothing to map.
833          */
834         return false;
835 }
836
837 /*
838  * Calculates the offset where the VMA range begins in its containing BAR.
839  * The offset is written into bar_offset on success.
840  * Returns zero on success, anything else on error.
841  */
842 static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
843                                     const struct vm_area_struct *vma,
844                                     ulong *bar_offset)
845 {
846         ulong raw_offset;
847         int bar_index;
848         const struct gasket_driver_desc *driver_desc =
849                 gasket_dev->internal_desc->driver_desc;
850
851         raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
852                 driver_desc->legacy_mmap_address_offset;
853         bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
854         if (bar_index < 0) {
855                 dev_err(gasket_dev->dev,
856                         "Unable to find matching bar for address 0x%lx\n",
857                         raw_offset);
858                 trace_gasket_mmap_exit(bar_index);
859                 return bar_index;
860         }
861         *bar_offset =
862                 raw_offset - driver_desc->bar_descriptions[bar_index].base;
863
864         return 0;
865 }
866
867 int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
868                            struct vm_area_struct *vma,
869                            const struct gasket_mappable_region *map_region)
870 {
871         ulong bar_offset;
872         ulong virt_offset;
873         struct gasket_mappable_region mappable_region;
874         int ret;
875
876         if (map_region->length_bytes == 0)
877                 return 0;
878
879         ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
880         if (ret)
881                 return ret;
882
883         if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
884                                          vma->vm_end - vma->vm_start,
885                                          &mappable_region, &virt_offset))
886                 return 1;
887
888         /*
889          * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
890          * PAGE_SIZE! Trust me. I have the scars.
891          *
892          * Next multiple of y: ceil_div(x, y) * y
893          */
894         zap_vma_ptes(vma, vma->vm_start + virt_offset,
895                      DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
896                      PAGE_SIZE);
897         return 0;
898 }
899 EXPORT_SYMBOL(gasket_mm_unmap_region);
900
901 /* Maps a virtual address + range to a physical offset of a BAR. */
902 static enum do_map_region_status
903 do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
904               struct gasket_mappable_region *mappable_region)
905 {
906         /* Maximum size of a single call to io_remap_pfn_range. */
907         /* I pulled this number out of thin air. */
908         const ulong max_chunk_size = 64 * 1024 * 1024;
909         ulong chunk_size, mapped_bytes = 0;
910
911         const struct gasket_driver_desc *driver_desc =
912                 gasket_dev->internal_desc->driver_desc;
913
914         ulong bar_offset, virt_offset;
915         struct gasket_mappable_region region_to_map;
916         ulong phys_offset, map_length;
917         ulong virt_base, phys_base;
918         int bar_index, ret;
919
920         ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
921         if (ret)
922                 return DO_MAP_REGION_INVALID;
923
924         if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
925                                          vma->vm_end - vma->vm_start,
926                                          &region_to_map, &virt_offset))
927                 return DO_MAP_REGION_INVALID;
928         phys_offset = region_to_map.start;
929         map_length = region_to_map.length_bytes;
930
931         virt_base = vma->vm_start + virt_offset;
932         bar_index =
933                 gasket_get_bar_index(gasket_dev,
934                                      (vma->vm_pgoff << PAGE_SHIFT) +
935                                      driver_desc->legacy_mmap_address_offset);
936
937         if (bar_index < 0)
938                 return DO_MAP_REGION_INVALID;
939
940         phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
941         while (mapped_bytes < map_length) {
942                 /*
943                  * io_remap_pfn_range can take a while, so we chunk its
944                  * calls and call cond_resched between each.
945                  */
946                 chunk_size = min(max_chunk_size, map_length - mapped_bytes);
947
948                 cond_resched();
949                 ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
950                                          (phys_base + mapped_bytes) >>
951                                          PAGE_SHIFT, chunk_size,
952                                          vma->vm_page_prot);
953                 if (ret) {
954                         dev_err(gasket_dev->dev,
955                                 "Error remapping PFN range.\n");
956                         goto fail;
957                 }
958                 mapped_bytes += chunk_size;
959         }
960
961         return DO_MAP_REGION_SUCCESS;
962
963 fail:
964         /* Unmap the partial chunk we mapped. */
965         mappable_region->length_bytes = mapped_bytes;
966         if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
967                 dev_err(gasket_dev->dev,
968                         "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
969                         (ulong)virt_offset,
970                         (ulong)mapped_bytes);
971
972         return DO_MAP_REGION_FAILURE;
973 }
974
975 /* Map a region of coherent memory. */
976 static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
977                                 struct vm_area_struct *vma)
978 {
979         const struct gasket_driver_desc *driver_desc =
980                 gasket_dev->internal_desc->driver_desc;
981         const ulong requested_length = vma->vm_end - vma->vm_start;
982         int ret;
983         ulong permissions;
984
985         if (requested_length == 0 || requested_length >
986             gasket_dev->coherent_buffer.length_bytes) {
987                 trace_gasket_mmap_exit(-EINVAL);
988                 return -EINVAL;
989         }
990
991         permissions = driver_desc->coherent_buffer_description.permissions;
992         if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
993                 dev_err(gasket_dev->dev, "Permission checking failed.\n");
994                 trace_gasket_mmap_exit(-EPERM);
995                 return -EPERM;
996         }
997
998         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
999
1000         ret = remap_pfn_range(vma, vma->vm_start,
1001                               (gasket_dev->coherent_buffer.phys_base) >>
1002                               PAGE_SHIFT, requested_length, vma->vm_page_prot);
1003         if (ret) {
1004                 dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
1005                         ret);
1006                 trace_gasket_mmap_exit(ret);
1007                 return ret;
1008         }
1009
1010         /* Record the user virtual to dma_address mapping that was
1011          * created by the kernel.
1012          */
1013         gasket_set_user_virt(gasket_dev, requested_length,
1014                              gasket_dev->coherent_buffer.phys_base,
1015                              vma->vm_start);
1016         return 0;
1017 }
1018
1019 /* Map a device's BARs into user space. */
1020 static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1021 {
1022         int i, ret;
1023         int bar_index;
1024         int has_mapped_anything = 0;
1025         ulong permissions;
1026         ulong raw_offset, vma_size;
1027         bool is_coherent_region;
1028         const struct gasket_driver_desc *driver_desc;
1029         struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1030         const struct gasket_bar_desc *bar_desc;
1031         struct gasket_mappable_region *map_regions = NULL;
1032         int num_map_regions = 0;
1033         enum do_map_region_status map_status;
1034
1035         driver_desc = gasket_dev->internal_desc->driver_desc;
1036
1037         if (vma->vm_start & ~PAGE_MASK) {
1038                 dev_err(gasket_dev->dev,
1039                         "Base address not page-aligned: 0x%lx\n",
1040                         vma->vm_start);
1041                 trace_gasket_mmap_exit(-EINVAL);
1042                 return -EINVAL;
1043         }
1044
1045         /* Calculate the offset of this range into physical mem. */
1046         raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1047                 driver_desc->legacy_mmap_address_offset;
1048         vma_size = vma->vm_end - vma->vm_start;
1049         trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1050                                 vma_size);
1051
1052         /*
1053          * Check if the raw offset is within a bar region. If not, check if it
1054          * is a coherent region.
1055          */
1056         bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1057         is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1058         if (bar_index < 0 && !is_coherent_region) {
1059                 dev_err(gasket_dev->dev,
1060                         "Unable to find matching bar for address 0x%lx\n",
1061                         raw_offset);
1062                 trace_gasket_mmap_exit(bar_index);
1063                 return bar_index;
1064         }
1065         if (bar_index > 0 && is_coherent_region) {
1066                 dev_err(gasket_dev->dev,
1067                         "double matching bar and coherent buffers for address "
1068                         "0x%lx\n",
1069                         raw_offset);
1070                 trace_gasket_mmap_exit(bar_index);
1071                 return -EINVAL;
1072         }
1073
1074         vma->vm_private_data = gasket_dev;
1075
1076         if (is_coherent_region)
1077                 return gasket_mmap_coherent(gasket_dev, vma);
1078
1079         /* Everything in the rest of this function is for normal BAR mapping. */
1080
1081         /*
1082          * Subtract the base of the bar from the raw offset to get the
1083          * memory location within the bar to map.
1084          */
1085         bar_desc = &driver_desc->bar_descriptions[bar_index];
1086         permissions = bar_desc->permissions;
1087         if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1088                 dev_err(gasket_dev->dev, "Permission checking failed.\n");
1089                 trace_gasket_mmap_exit(-EPERM);
1090                 return -EPERM;
1091         }
1092
1093         if (driver_desc->get_mappable_regions_cb) {
1094                 ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1095                                                            bar_index,
1096                                                            &map_regions,
1097                                                            &num_map_regions);
1098                 if (ret)
1099                         return ret;
1100         } else {
1101                 if (!gasket_mmap_has_permissions(gasket_dev, vma,
1102                                                  bar_desc->permissions)) {
1103                         dev_err(gasket_dev->dev,
1104                                 "Permission checking failed.\n");
1105                         trace_gasket_mmap_exit(-EPERM);
1106                         return -EPERM;
1107                 }
1108                 num_map_regions = bar_desc->num_mappable_regions;
1109                 map_regions = kcalloc(num_map_regions,
1110                                       sizeof(*bar_desc->mappable_regions),
1111                                       GFP_KERNEL);
1112                 if (map_regions) {
1113                         memcpy(map_regions, bar_desc->mappable_regions,
1114                                num_map_regions *
1115                                         sizeof(*bar_desc->mappable_regions));
1116                 }
1117         }
1118
1119         if (!map_regions || num_map_regions == 0) {
1120                 dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1121                 return -EINVAL;
1122         }
1123
1124         /* Marks the VMA's pages as uncacheable. */
1125         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1126         for (i = 0; i < num_map_regions; i++) {
1127                 map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1128                 /* Try the next region if this one was not mappable. */
1129                 if (map_status == DO_MAP_REGION_INVALID)
1130                         continue;
1131                 if (map_status == DO_MAP_REGION_FAILURE) {
1132                         ret = -ENOMEM;
1133                         goto fail;
1134                 }
1135
1136                 has_mapped_anything = 1;
1137         }
1138
1139         kfree(map_regions);
1140
1141         /* If we could not map any memory, the request was invalid. */
1142         if (!has_mapped_anything) {
1143                 dev_err(gasket_dev->dev,
1144                         "Map request did not contain a valid region.\n");
1145                 trace_gasket_mmap_exit(-EINVAL);
1146                 return -EINVAL;
1147         }
1148
1149         trace_gasket_mmap_exit(0);
1150         return 0;
1151
1152 fail:
1153         /* Need to unmap any mapped ranges. */
1154         num_map_regions = i;
1155         for (i = 0; i < num_map_regions; i++)
1156                 if (gasket_mm_unmap_region(gasket_dev, vma,
1157                                            &bar_desc->mappable_regions[i]))
1158                         dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1159                                 i);
1160         kfree(map_regions);
1161
1162         return ret;
1163 }
1164
1165 /*
1166  * Open the char device file.
1167  *
1168  * If the open is for writing, and the device is not owned, this process becomes
1169  * the owner.  If the open is for writing and the device is already owned by
1170  * some other process, it is an error.  If this process is the owner, increment
1171  * the open count.
1172  *
1173  * Returns 0 if successful, a negative error number otherwise.
1174  */
1175 static int gasket_open(struct inode *inode, struct file *filp)
1176 {
1177         int ret;
1178         struct gasket_dev *gasket_dev;
1179         const struct gasket_driver_desc *driver_desc;
1180         struct gasket_ownership *ownership;
1181         char task_name[TASK_COMM_LEN];
1182         struct gasket_cdev_info *dev_info =
1183             container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1184         struct pid_namespace *pid_ns = task_active_pid_ns(current);
1185         bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1186
1187         gasket_dev = dev_info->gasket_dev_ptr;
1188         driver_desc = gasket_dev->internal_desc->driver_desc;
1189         ownership = &dev_info->ownership;
1190         get_task_comm(task_name, current);
1191         filp->private_data = gasket_dev;
1192         inode->i_size = 0;
1193
1194         dev_dbg(gasket_dev->dev,
1195                 "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
1196                 "fmode_write: %d is_root: %u)\n",
1197                 current->tgid, task_name, filp->f_mode,
1198                 (filp->f_mode & FMODE_WRITE), is_root);
1199
1200         /* Always allow non-writing accesses. */
1201         if (!(filp->f_mode & FMODE_WRITE)) {
1202                 dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1203                 return 0;
1204         }
1205
1206         mutex_lock(&gasket_dev->mutex);
1207
1208         dev_dbg(gasket_dev->dev,
1209                 "Current owner open count (owning tgid %u): %d.\n",
1210                 ownership->owner, ownership->write_open_count);
1211
1212         /* Opening a node owned by another TGID is an error (unless root) */
1213         if (ownership->is_owned && ownership->owner != current->tgid &&
1214             !is_root) {
1215                 dev_err(gasket_dev->dev,
1216                         "Process %u is opening a node held by %u.\n",
1217                         current->tgid, ownership->owner);
1218                 mutex_unlock(&gasket_dev->mutex);
1219                 return -EPERM;
1220         }
1221
1222         /* If the node is not owned, assign it to the current TGID. */
1223         if (!ownership->is_owned) {
1224                 ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1225                                                               driver_desc->device_open_cb);
1226                 if (ret) {
1227                         dev_err(gasket_dev->dev,
1228                                 "Error in device open cb: %d\n", ret);
1229                         mutex_unlock(&gasket_dev->mutex);
1230                         return ret;
1231                 }
1232                 ownership->is_owned = 1;
1233                 ownership->owner = current->tgid;
1234                 dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1235                         ownership->owner);
1236         }
1237
1238         ownership->write_open_count++;
1239
1240         dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1241                 ownership->owner, ownership->write_open_count);
1242
1243         mutex_unlock(&gasket_dev->mutex);
1244         return 0;
1245 }
1246
1247 /*
1248  * Called on a close of the device file.  If this process is the owner,
1249  * decrement the open count.  On last close by the owner, free up buffers and
1250  * eventfd contexts, and release ownership.
1251  *
1252  * Returns 0 if successful, a negative error number otherwise.
1253  */
1254 static int gasket_release(struct inode *inode, struct file *file)
1255 {
1256         int i;
1257         struct gasket_dev *gasket_dev;
1258         struct gasket_ownership *ownership;
1259         const struct gasket_driver_desc *driver_desc;
1260         char task_name[TASK_COMM_LEN];
1261         struct gasket_cdev_info *dev_info =
1262                 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1263         struct pid_namespace *pid_ns = task_active_pid_ns(current);
1264         bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1265
1266         gasket_dev = dev_info->gasket_dev_ptr;
1267         driver_desc = gasket_dev->internal_desc->driver_desc;
1268         ownership = &dev_info->ownership;
1269         get_task_comm(task_name, current);
1270         mutex_lock(&gasket_dev->mutex);
1271
1272         dev_dbg(gasket_dev->dev,
1273                 "Releasing device node. Call origin: tgid %u (%s) "
1274                 "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1275                 current->tgid, task_name, file->f_mode,
1276                 (file->f_mode & FMODE_WRITE), is_root);
1277         dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1278                 ownership->owner, ownership->write_open_count);
1279
1280         if (file->f_mode & FMODE_WRITE) {
1281                 ownership->write_open_count--;
1282                 if (ownership->write_open_count == 0) {
1283                         dev_dbg(gasket_dev->dev, "Device is now free\n");
1284                         ownership->is_owned = 0;
1285                         ownership->owner = 0;
1286
1287                         /* Forces chip reset before we unmap the page tables. */
1288                         driver_desc->device_reset_cb(gasket_dev);
1289
1290                         for (i = 0; i < driver_desc->num_page_tables; ++i) {
1291                                 gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1292                                 gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1293                                 gasket_free_coherent_memory_all(gasket_dev, i);
1294                         }
1295
1296                         /* Closes device, enters power save. */
1297                         gasket_check_and_invoke_callback_nolock(gasket_dev,
1298                                                                 driver_desc->device_close_cb);
1299                 }
1300         }
1301
1302         dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1303                 ownership->owner, ownership->write_open_count);
1304         mutex_unlock(&gasket_dev->mutex);
1305         return 0;
1306 }
1307
1308 /*
1309  * Gasket ioctl dispatch function.
1310  *
1311  * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1312  * ioctl_handler_cb registered in the driver description.
1313  * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1314  */
1315 static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1316 {
1317         struct gasket_dev *gasket_dev;
1318         const struct gasket_driver_desc *driver_desc;
1319         void __user *argp = (void __user *)arg;
1320         char path[256];
1321
1322         gasket_dev = (struct gasket_dev *)filp->private_data;
1323         driver_desc = gasket_dev->internal_desc->driver_desc;
1324         if (!driver_desc) {
1325                 dev_dbg(gasket_dev->dev,
1326                         "Unable to find device descriptor for file %s\n",
1327                         d_path(&filp->f_path, path, 256));
1328                 return -ENODEV;
1329         }
1330
1331         if (!gasket_is_supported_ioctl(cmd)) {
1332                 /*
1333                  * The ioctl handler is not a standard Gasket callback, since
1334                  * it requires different arguments. This means we can't use
1335                  * check_and_invoke_callback.
1336                  */
1337                 if (driver_desc->ioctl_handler_cb)
1338                         return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1339
1340                 dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1341                 return -EINVAL;
1342         }
1343
1344         return gasket_handle_ioctl(filp, cmd, argp);
1345 }
1346
1347 /* File operations for all Gasket devices. */
1348 static const struct file_operations gasket_file_ops = {
1349         .owner = THIS_MODULE,
1350         .llseek = no_llseek,
1351         .mmap = gasket_mmap,
1352         .open = gasket_open,
1353         .release = gasket_release,
1354         .unlocked_ioctl = gasket_ioctl,
1355 };
1356
1357 /* Perform final init and marks the device as active. */
1358 int gasket_enable_device(struct gasket_dev *gasket_dev)
1359 {
1360         int tbl_idx;
1361         int ret;
1362         const struct gasket_driver_desc *driver_desc =
1363                 gasket_dev->internal_desc->driver_desc;
1364
1365         ret = gasket_interrupt_init(gasket_dev, driver_desc->name,
1366                                     driver_desc->interrupt_type,
1367                                     driver_desc->interrupts,
1368                                     driver_desc->num_interrupts,
1369                                     driver_desc->interrupt_pack_width,
1370                                     driver_desc->interrupt_bar_index,
1371                                     driver_desc->wire_interrupt_offsets);
1372         if (ret) {
1373                 dev_err(gasket_dev->dev,
1374                         "Critical failure to allocate interrupts: %d\n", ret);
1375                 gasket_interrupt_cleanup(gasket_dev);
1376                 return ret;
1377         }
1378
1379         for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1380                 dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1381                         tbl_idx);
1382                 ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1383                                              &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1384                                              &driver_desc->page_table_configs[tbl_idx],
1385                                              gasket_dev->dev,
1386                                              gasket_dev->pci_dev);
1387                 if (ret) {
1388                         dev_err(gasket_dev->dev,
1389                                 "Couldn't init page table %d: %d\n",
1390                                 tbl_idx, ret);
1391                         return ret;
1392                 }
1393                 /*
1394                  * Make sure that the page table is clear and set to simple
1395                  * addresses.
1396                  */
1397                 gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1398         }
1399
1400         /*
1401          * hardware_revision_cb returns a positive integer (the rev) if
1402          * successful.)
1403          */
1404         ret = check_and_invoke_callback(gasket_dev,
1405                                         driver_desc->hardware_revision_cb);
1406         if (ret < 0) {
1407                 dev_err(gasket_dev->dev,
1408                         "Error getting hardware revision: %d\n", ret);
1409                 return ret;
1410         }
1411         gasket_dev->hardware_revision = ret;
1412
1413         /* device_status_cb returns a device status, not an error code. */
1414         gasket_dev->status = gasket_get_hw_status(gasket_dev);
1415         if (gasket_dev->status == GASKET_STATUS_DEAD)
1416                 dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1417
1418         ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1419                               driver_desc->module);
1420         if (ret)
1421                 return ret;
1422
1423         return 0;
1424 }
1425 EXPORT_SYMBOL(gasket_enable_device);
1426
1427 /*
1428  * Add PCI gasket device.
1429  *
1430  * Called by Gasket device probe function.
1431  * Allocates device metadata and maps device memory.  The device driver must
1432  * call gasket_enable_device after driver init is complete to place the device
1433  * in active use.
1434  */
1435 int gasket_pci_add_device(struct pci_dev *pci_dev,
1436                           struct gasket_dev **gasket_devp)
1437 {
1438         int ret;
1439         const char *kobj_name = dev_name(&pci_dev->dev);
1440         struct gasket_internal_desc *internal_desc;
1441         struct gasket_dev *gasket_dev;
1442         const struct gasket_driver_desc *driver_desc;
1443         struct device *parent;
1444
1445         pr_debug("add PCI device %s\n", kobj_name);
1446
1447         mutex_lock(&g_mutex);
1448         internal_desc = lookup_internal_desc(pci_dev);
1449         mutex_unlock(&g_mutex);
1450         if (!internal_desc) {
1451                 dev_err(&pci_dev->dev,
1452                         "PCI add device called for unknown driver type\n");
1453                 return -ENODEV;
1454         }
1455
1456         driver_desc = internal_desc->driver_desc;
1457
1458         parent = &pci_dev->dev;
1459         ret = gasket_alloc_dev(internal_desc, parent, &gasket_dev, kobj_name);
1460         if (ret)
1461                 return ret;
1462         gasket_dev->pci_dev = pci_dev;
1463         if (IS_ERR_OR_NULL(gasket_dev->dev_info.device)) {
1464                 pr_err("Cannot create %s device %s [ret = %ld]\n",
1465                        driver_desc->name, gasket_dev->dev_info.name,
1466                        PTR_ERR(gasket_dev->dev_info.device));
1467                 ret = -ENODEV;
1468                 goto fail1;
1469         }
1470
1471         ret = gasket_setup_pci(pci_dev, gasket_dev);
1472         if (ret)
1473                 goto fail2;
1474
1475         ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1476                                           gasket_dev);
1477         if (ret)
1478                 goto fail3;
1479
1480         /*
1481          * Once we've created the mapping structures successfully, attempt to
1482          * create a symlink to the pci directory of this object.
1483          */
1484         ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1485                                 &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1486         if (ret) {
1487                 dev_err(gasket_dev->dev,
1488                         "Cannot create sysfs pci link: %d\n", ret);
1489                 goto fail3;
1490         }
1491         ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1492                                           gasket_sysfs_generic_attrs);
1493         if (ret)
1494                 goto fail4;
1495
1496         *gasket_devp = gasket_dev;
1497         return 0;
1498
1499 fail4:
1500 fail3:
1501         gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1502 fail2:
1503         gasket_cleanup_pci(gasket_dev);
1504         device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1505 fail1:
1506         gasket_free_dev(gasket_dev);
1507         return ret;
1508 }
1509 EXPORT_SYMBOL(gasket_pci_add_device);
1510
1511 /* Remove a PCI gasket device. */
1512 void gasket_pci_remove_device(struct pci_dev *pci_dev)
1513 {
1514         int i;
1515         struct gasket_internal_desc *internal_desc;
1516         struct gasket_dev *gasket_dev = NULL;
1517         const struct gasket_driver_desc *driver_desc;
1518         /* Find the device desc. */
1519         mutex_lock(&g_mutex);
1520         internal_desc = lookup_internal_desc(pci_dev);
1521         if (!internal_desc) {
1522                 mutex_unlock(&g_mutex);
1523                 return;
1524         }
1525         mutex_unlock(&g_mutex);
1526
1527         driver_desc = internal_desc->driver_desc;
1528
1529         /* Now find the specific device */
1530         mutex_lock(&internal_desc->mutex);
1531         for (i = 0; i < GASKET_DEV_MAX; i++) {
1532                 if (internal_desc->devs[i] &&
1533                     internal_desc->devs[i]->pci_dev == pci_dev) {
1534                         gasket_dev = internal_desc->devs[i];
1535                         break;
1536                 }
1537         }
1538         mutex_unlock(&internal_desc->mutex);
1539
1540         if (!gasket_dev)
1541                 return;
1542
1543         dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1544                 internal_desc->driver_desc->name);
1545
1546         gasket_cleanup_pci(gasket_dev);
1547
1548         gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1549         device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1550         gasket_free_dev(gasket_dev);
1551 }
1552 EXPORT_SYMBOL(gasket_pci_remove_device);
1553
1554 /**
1555  * Lookup a name by number in a num_name table.
1556  * @num: Number to lookup.
1557  * @table: Array of num_name structures, the table for the lookup.
1558  *
1559  * Description: Searches for num in the table.  If found, the
1560  *              corresponding name is returned; otherwise NULL
1561  *              is returned.
1562  *
1563  *              The table must have a NULL name pointer at the end.
1564  */
1565 const char *gasket_num_name_lookup(uint num,
1566                                    const struct gasket_num_name *table)
1567 {
1568         uint i = 0;
1569
1570         while (table[i].snn_name) {
1571                 if (num == table[i].snn_num)
1572                         break;
1573                 ++i;
1574         }
1575
1576         return table[i].snn_name;
1577 }
1578 EXPORT_SYMBOL(gasket_num_name_lookup);
1579
1580 int gasket_reset(struct gasket_dev *gasket_dev)
1581 {
1582         int ret;
1583
1584         mutex_lock(&gasket_dev->mutex);
1585         ret = gasket_reset_nolock(gasket_dev);
1586         mutex_unlock(&gasket_dev->mutex);
1587         return ret;
1588 }
1589 EXPORT_SYMBOL(gasket_reset);
1590
1591 int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1592 {
1593         int ret;
1594         int i;
1595         const struct gasket_driver_desc *driver_desc;
1596
1597         driver_desc = gasket_dev->internal_desc->driver_desc;
1598         if (!driver_desc->device_reset_cb)
1599                 return 0;
1600
1601         ret = driver_desc->device_reset_cb(gasket_dev);
1602         if (ret) {
1603                 dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1604                         ret);
1605                 return ret;
1606         }
1607
1608         /* Reinitialize the page tables and interrupt framework. */
1609         for (i = 0; i < driver_desc->num_page_tables; ++i)
1610                 gasket_page_table_reset(gasket_dev->page_table[i]);
1611
1612         ret = gasket_interrupt_reinit(gasket_dev);
1613         if (ret) {
1614                 dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1615                         ret);
1616                 return ret;
1617         }
1618
1619         /* Get current device health. */
1620         gasket_dev->status = gasket_get_hw_status(gasket_dev);
1621         if (gasket_dev->status == GASKET_STATUS_DEAD) {
1622                 dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1623                 return -EINVAL;
1624         }
1625
1626         return 0;
1627 }
1628 EXPORT_SYMBOL(gasket_reset_nolock);
1629
1630 gasket_ioctl_permissions_cb_t
1631 gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1632 {
1633         return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1634 }
1635 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1636
1637 /* Get the driver structure for a given gasket_dev.
1638  * @dev: pointer to gasket_dev, implementing the requested driver.
1639  */
1640 const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1641 {
1642         return dev->internal_desc->driver_desc;
1643 }
1644
1645 /* Get the device structure for a given gasket_dev.
1646  * @dev: pointer to gasket_dev, implementing the requested driver.
1647  */
1648 struct device *gasket_get_device(struct gasket_dev *dev)
1649 {
1650         return dev->dev;
1651 }
1652
1653 /**
1654  * Asynchronously waits on device.
1655  * @gasket_dev: Device struct.
1656  * @bar: Bar
1657  * @offset: Register offset
1658  * @mask: Register mask
1659  * @val: Expected value
1660  * @max_retries: number of sleep periods
1661  * @delay_ms: Timeout in milliseconds
1662  *
1663  * Description: Busy waits for a specific combination of bits to be set on a
1664  * Gasket register.
1665  **/
1666 int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1667                                 u64 offset, u64 mask, u64 val,
1668                                 uint max_retries, u64 delay_ms)
1669 {
1670         uint retries = 0;
1671         u64 tmp;
1672
1673         while (retries < max_retries) {
1674                 tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1675                 if ((tmp & mask) == val)
1676                         return 0;
1677                 msleep(delay_ms);
1678                 retries++;
1679         }
1680         dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1681                 __func__, offset, max_retries * delay_ms);
1682         return -ETIMEDOUT;
1683 }
1684 EXPORT_SYMBOL(gasket_wait_with_reschedule);
1685
1686 /* See gasket_core.h for description. */
1687 int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1688 {
1689         int i, ret;
1690         int desc_idx = -1;
1691         struct gasket_internal_desc *internal;
1692
1693         pr_debug("Loading %s driver version %s\n", driver_desc->name,
1694                  driver_desc->driver_version);
1695         /* Check for duplicates and find a free slot. */
1696         mutex_lock(&g_mutex);
1697
1698         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1699                 if (g_descs[i].driver_desc == driver_desc) {
1700                         pr_err("%s driver already loaded/registered\n",
1701                                driver_desc->name);
1702                         mutex_unlock(&g_mutex);
1703                         return -EBUSY;
1704                 }
1705         }
1706
1707         /* This and the above loop could be combined, but this reads easier. */
1708         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1709                 if (!g_descs[i].driver_desc) {
1710                         g_descs[i].driver_desc = driver_desc;
1711                         desc_idx = i;
1712                         break;
1713                 }
1714         }
1715         mutex_unlock(&g_mutex);
1716
1717         if (desc_idx == -1) {
1718                 pr_err("too many drivers loaded, max %d\n",
1719                        GASKET_FRAMEWORK_DESC_MAX);
1720                 return -EBUSY;
1721         }
1722
1723         internal = &g_descs[desc_idx];
1724         mutex_init(&internal->mutex);
1725         memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1726         internal->class =
1727                 class_create(driver_desc->module, driver_desc->name);
1728
1729         if (IS_ERR(internal->class)) {
1730                 pr_err("Cannot register %s class [ret=%ld]\n",
1731                        driver_desc->name, PTR_ERR(internal->class));
1732                 ret = PTR_ERR(internal->class);
1733                 goto unregister_gasket_driver;
1734         }
1735
1736         ret = register_chrdev_region(MKDEV(driver_desc->major,
1737                                            driver_desc->minor), GASKET_DEV_MAX,
1738                                      driver_desc->name);
1739         if (ret) {
1740                 pr_err("cannot register %s char driver [ret=%d]\n",
1741                        driver_desc->name, ret);
1742                 goto destroy_class;
1743         }
1744
1745         return 0;
1746
1747 destroy_class:
1748         class_destroy(internal->class);
1749
1750 unregister_gasket_driver:
1751         mutex_lock(&g_mutex);
1752         g_descs[desc_idx].driver_desc = NULL;
1753         mutex_unlock(&g_mutex);
1754         return ret;
1755 }
1756 EXPORT_SYMBOL(gasket_register_device);
1757
1758 /* See gasket_core.h for description. */
1759 void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1760 {
1761         int i, desc_idx;
1762         struct gasket_internal_desc *internal_desc = NULL;
1763
1764         mutex_lock(&g_mutex);
1765         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1766                 if (g_descs[i].driver_desc == driver_desc) {
1767                         internal_desc = &g_descs[i];
1768                         desc_idx = i;
1769                         break;
1770                 }
1771         }
1772
1773         if (!internal_desc) {
1774                 mutex_unlock(&g_mutex);
1775                 pr_err("request to unregister unknown desc: %s, %d:%d\n",
1776                        driver_desc->name, driver_desc->major,
1777                        driver_desc->minor);
1778                 return;
1779         }
1780
1781         unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1782                                  GASKET_DEV_MAX);
1783
1784         class_destroy(internal_desc->class);
1785
1786         /* Finally, effectively "remove" the driver. */
1787         g_descs[desc_idx].driver_desc = NULL;
1788         mutex_unlock(&g_mutex);
1789
1790         pr_debug("removed %s driver\n", driver_desc->name);
1791 }
1792 EXPORT_SYMBOL(gasket_unregister_device);
1793
1794 static int __init gasket_init(void)
1795 {
1796         int i;
1797
1798         pr_debug("%s\n", __func__);
1799         mutex_lock(&g_mutex);
1800         for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1801                 g_descs[i].driver_desc = NULL;
1802                 mutex_init(&g_descs[i].mutex);
1803         }
1804
1805         gasket_sysfs_init();
1806
1807         mutex_unlock(&g_mutex);
1808         return 0;
1809 }
1810
1811 static void __exit gasket_exit(void)
1812 {
1813         pr_debug("%s\n", __func__);
1814 }
1815 MODULE_DESCRIPTION("Google Gasket driver framework");
1816 MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1817 MODULE_LICENSE("GPL v2");
1818 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1819 module_init(gasket_init);
1820 module_exit(gasket_exit);