2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
32 #include "vfio_pci_private.h"
34 #define DRIVER_VERSION "0.2"
35 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
38 static char ids[1024] __initdata;
39 module_param_string(ids, ids, sizeof(ids), 0);
40 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
42 static bool nointxmask;
43 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(nointxmask,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
47 #ifdef CONFIG_VFIO_PCI_VGA
48 static bool disable_vga;
49 module_param(disable_vga, bool, S_IRUGO);
50 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
53 static bool disable_idle_d3;
54 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(disable_idle_d3,
56 "Disable using the PCI D3 low power state for idle, unused devices");
58 static DEFINE_MUTEX(driver_lock);
60 static inline bool vfio_vga_disabled(void)
62 #ifdef CONFIG_VFIO_PCI_VGA
70 * Our VGA arbiter participation is limited since we don't know anything
71 * about the device itself. However, if the device is the only VGA device
72 * downstream of a bridge and VFIO VGA support is disabled, then we can
73 * safely return legacy VGA IO and memory as not decoded since the user
74 * has no way to get to it and routing can be disabled externally at the
77 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
79 struct vfio_pci_device *vdev = opaque;
80 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
81 unsigned char max_busnr;
84 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
85 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
86 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
88 max_busnr = pci_bus_max_busnr(pdev->bus);
89 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
91 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
93 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
94 pci_is_root_bus(tmp->bus))
97 if (tmp->bus->number >= pdev->bus->number &&
98 tmp->bus->number <= max_busnr) {
100 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
108 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
110 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
113 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
115 static int vfio_pci_enable(struct vfio_pci_device *vdev)
117 struct pci_dev *pdev = vdev->pdev;
122 pci_set_power_state(pdev, PCI_D0);
124 /* Don't allow our initial saved state to include busmaster */
125 pci_clear_master(pdev);
127 ret = pci_enable_device(pdev);
131 vdev->reset_works = (pci_reset_function(pdev) == 0);
132 pci_save_state(pdev);
133 vdev->pci_saved_state = pci_store_saved_state(pdev);
134 if (!vdev->pci_saved_state)
135 pr_debug("%s: Couldn't store %s saved state\n",
136 __func__, dev_name(&pdev->dev));
138 ret = vfio_config_init(vdev);
140 kfree(vdev->pci_saved_state);
141 vdev->pci_saved_state = NULL;
142 pci_disable_device(pdev);
146 if (likely(!nointxmask))
147 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
149 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
150 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
151 cmd &= ~PCI_COMMAND_INTX_DISABLE;
152 pci_write_config_word(pdev, PCI_COMMAND, cmd);
155 msix_pos = pdev->msix_cap;
160 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
161 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
163 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
164 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
165 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
167 vdev->msix_bar = 0xFF;
169 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
170 vdev->has_vga = true;
175 static void vfio_pci_disable(struct vfio_pci_device *vdev)
177 struct pci_dev *pdev = vdev->pdev;
180 /* Stop the device from further DMA */
181 pci_clear_master(pdev);
183 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
184 VFIO_IRQ_SET_ACTION_TRIGGER,
185 vdev->irq_type, 0, 0, NULL);
187 vdev->virq_disabled = false;
189 vfio_config_free(vdev);
191 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
192 if (!vdev->barmap[bar])
194 pci_iounmap(pdev, vdev->barmap[bar]);
195 pci_release_selected_regions(pdev, 1 << bar);
196 vdev->barmap[bar] = NULL;
199 vdev->needs_reset = true;
202 * If we have saved state, restore it. If we can reset the device,
203 * even better. Resetting with current state seems better than
204 * nothing, but saving and restoring current state without reset
207 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
208 pr_info("%s: Couldn't reload %s saved state\n",
209 __func__, dev_name(&pdev->dev));
211 if (!vdev->reset_works)
214 pci_save_state(pdev);
218 * Disable INTx and MSI, presumably to avoid spurious interrupts
219 * during reset. Stolen from pci_reset_function()
221 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
224 * Try to get the locks ourselves to prevent a deadlock. The
225 * success of this is dependent on being able to lock the device,
226 * which is not always possible.
227 * We can not use the "try" reset interface here, which will
228 * overwrite the previously restored configuration information.
230 if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
231 if (device_trylock(&pdev->dev)) {
232 if (!__pci_reset_function_locked(pdev))
233 vdev->needs_reset = false;
234 device_unlock(&pdev->dev);
236 pci_cfg_access_unlock(pdev);
239 pci_restore_state(pdev);
241 pci_disable_device(pdev);
243 vfio_pci_try_bus_reset(vdev);
245 if (!disable_idle_d3)
246 pci_set_power_state(pdev, PCI_D3hot);
249 static void vfio_pci_release(void *device_data)
251 struct vfio_pci_device *vdev = device_data;
253 mutex_lock(&driver_lock);
255 if (!(--vdev->refcnt)) {
256 vfio_spapr_pci_eeh_release(vdev->pdev);
257 vfio_pci_disable(vdev);
258 mutex_lock(&vdev->igate);
259 if (vdev->err_trigger) {
260 eventfd_ctx_put(vdev->err_trigger);
261 vdev->err_trigger = NULL;
263 mutex_unlock(&vdev->igate);
265 mutex_lock(&vdev->igate);
266 if (vdev->req_trigger) {
267 eventfd_ctx_put(vdev->req_trigger);
268 vdev->req_trigger = NULL;
270 mutex_unlock(&vdev->igate);
273 mutex_unlock(&driver_lock);
275 module_put(THIS_MODULE);
278 static int vfio_pci_open(void *device_data)
280 struct vfio_pci_device *vdev = device_data;
283 if (!try_module_get(THIS_MODULE))
286 mutex_lock(&driver_lock);
289 ret = vfio_pci_enable(vdev);
293 vfio_spapr_pci_eeh_open(vdev->pdev);
297 mutex_unlock(&driver_lock);
299 module_put(THIS_MODULE);
303 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
305 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
307 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
308 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
311 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
315 pos = vdev->pdev->msi_cap;
317 pci_read_config_word(vdev->pdev,
318 pos + PCI_MSI_FLAGS, &flags);
319 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
321 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
325 pos = vdev->pdev->msix_cap;
327 pci_read_config_word(vdev->pdev,
328 pos + PCI_MSIX_FLAGS, &flags);
330 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
332 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
333 if (pci_is_pcie(vdev->pdev))
335 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
342 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
348 struct vfio_pci_fill_info {
351 struct vfio_pci_dependent_device *devices;
354 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
356 struct vfio_pci_fill_info *fill = data;
357 struct iommu_group *iommu_group;
359 if (fill->cur == fill->max)
360 return -EAGAIN; /* Something changed, try again */
362 iommu_group = iommu_group_get(&pdev->dev);
364 return -EPERM; /* Cannot reset non-isolated devices */
366 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
367 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
368 fill->devices[fill->cur].bus = pdev->bus->number;
369 fill->devices[fill->cur].devfn = pdev->devfn;
371 iommu_group_put(iommu_group);
375 struct vfio_pci_group_entry {
376 struct vfio_group *group;
380 struct vfio_pci_group_info {
382 struct vfio_pci_group_entry *groups;
385 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
387 struct vfio_pci_group_info *info = data;
388 struct iommu_group *group;
391 group = iommu_group_get(&pdev->dev);
395 id = iommu_group_id(group);
397 for (i = 0; i < info->count; i++)
398 if (info->groups[i].id == id)
401 iommu_group_put(group);
403 return (i == info->count) ? -EINVAL : 0;
406 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
408 for (; pdev; pdev = pdev->bus->self)
409 if (pdev->bus == slot->bus)
410 return (pdev->slot == slot);
414 struct vfio_pci_walk_info {
415 int (*fn)(struct pci_dev *, void *data);
417 struct pci_dev *pdev;
422 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
424 struct vfio_pci_walk_info *walk = data;
426 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
427 walk->ret = walk->fn(pdev, walk->data);
432 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
433 int (*fn)(struct pci_dev *,
434 void *data), void *data,
437 struct vfio_pci_walk_info walk = {
438 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
441 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
446 static long vfio_pci_ioctl(void *device_data,
447 unsigned int cmd, unsigned long arg)
449 struct vfio_pci_device *vdev = device_data;
452 if (cmd == VFIO_DEVICE_GET_INFO) {
453 struct vfio_device_info info;
455 minsz = offsetofend(struct vfio_device_info, num_irqs);
457 if (copy_from_user(&info, (void __user *)arg, minsz))
460 if (info.argsz < minsz)
463 info.flags = VFIO_DEVICE_FLAGS_PCI;
465 if (vdev->reset_works)
466 info.flags |= VFIO_DEVICE_FLAGS_RESET;
468 info.num_regions = VFIO_PCI_NUM_REGIONS;
469 info.num_irqs = VFIO_PCI_NUM_IRQS;
471 return copy_to_user((void __user *)arg, &info, minsz) ?
474 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
475 struct pci_dev *pdev = vdev->pdev;
476 struct vfio_region_info info;
478 minsz = offsetofend(struct vfio_region_info, offset);
480 if (copy_from_user(&info, (void __user *)arg, minsz))
483 if (info.argsz < minsz)
486 switch (info.index) {
487 case VFIO_PCI_CONFIG_REGION_INDEX:
488 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
489 info.size = pdev->cfg_size;
490 info.flags = VFIO_REGION_INFO_FLAG_READ |
491 VFIO_REGION_INFO_FLAG_WRITE;
493 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
494 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
495 info.size = pci_resource_len(pdev, info.index);
501 info.flags = VFIO_REGION_INFO_FLAG_READ |
502 VFIO_REGION_INFO_FLAG_WRITE;
503 if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
504 pci_resource_flags(pdev, info.index) &
505 IORESOURCE_MEM && info.size >= PAGE_SIZE)
506 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
508 case VFIO_PCI_ROM_REGION_INDEX:
514 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
517 /* Report the BAR size, not the ROM size */
518 info.size = pci_resource_len(pdev, info.index);
523 * Is it really there? Enable memory decode for
524 * implicit access in pci_map_rom().
526 pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
527 pci_write_config_word(pdev, PCI_COMMAND,
528 orig_cmd | PCI_COMMAND_MEMORY);
530 io = pci_map_rom(pdev, &size);
532 info.flags = VFIO_REGION_INFO_FLAG_READ;
533 pci_unmap_rom(pdev, io);
538 pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
541 case VFIO_PCI_VGA_REGION_INDEX:
545 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
547 info.flags = VFIO_REGION_INFO_FLAG_READ |
548 VFIO_REGION_INFO_FLAG_WRITE;
555 return copy_to_user((void __user *)arg, &info, minsz) ?
558 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
559 struct vfio_irq_info info;
561 minsz = offsetofend(struct vfio_irq_info, count);
563 if (copy_from_user(&info, (void __user *)arg, minsz))
566 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
569 switch (info.index) {
570 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
571 case VFIO_PCI_REQ_IRQ_INDEX:
573 case VFIO_PCI_ERR_IRQ_INDEX:
574 if (pci_is_pcie(vdev->pdev))
576 /* pass thru to return error */
581 info.flags = VFIO_IRQ_INFO_EVENTFD;
583 info.count = vfio_pci_get_irq_count(vdev, info.index);
585 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
586 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
587 VFIO_IRQ_INFO_AUTOMASKED);
589 info.flags |= VFIO_IRQ_INFO_NORESIZE;
591 return copy_to_user((void __user *)arg, &info, minsz) ?
594 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
595 struct vfio_irq_set hdr;
600 minsz = offsetofend(struct vfio_irq_set, count);
602 if (copy_from_user(&hdr, (void __user *)arg, minsz))
605 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
606 hdr.count >= (U32_MAX - hdr.start) ||
607 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
608 VFIO_IRQ_SET_ACTION_TYPE_MASK))
611 max = vfio_pci_get_irq_count(vdev, hdr.index);
612 if (hdr.start >= max || hdr.start + hdr.count > max)
615 switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
616 case VFIO_IRQ_SET_DATA_NONE:
619 case VFIO_IRQ_SET_DATA_BOOL:
620 size = sizeof(uint8_t);
622 case VFIO_IRQ_SET_DATA_EVENTFD:
623 size = sizeof(int32_t);
630 if (hdr.argsz - minsz < hdr.count * size)
633 data = memdup_user((void __user *)(arg + minsz),
636 return PTR_ERR(data);
639 mutex_lock(&vdev->igate);
641 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
642 hdr.start, hdr.count, data);
644 mutex_unlock(&vdev->igate);
649 } else if (cmd == VFIO_DEVICE_RESET) {
650 return vdev->reset_works ?
651 pci_try_reset_function(vdev->pdev) : -EINVAL;
653 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
654 struct vfio_pci_hot_reset_info hdr;
655 struct vfio_pci_fill_info fill = { 0 };
656 struct vfio_pci_dependent_device *devices = NULL;
660 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
662 if (copy_from_user(&hdr, (void __user *)arg, minsz))
665 if (hdr.argsz < minsz)
670 /* Can we do a slot or bus reset or neither? */
671 if (!pci_probe_reset_slot(vdev->pdev->slot))
673 else if (pci_probe_reset_bus(vdev->pdev->bus))
676 /* How many devices are affected? */
677 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
683 WARN_ON(!fill.max); /* Should always be at least one */
686 * If there's enough space, fill it now, otherwise return
687 * -ENOSPC and the number of devices affected.
689 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
691 hdr.count = fill.max;
692 goto reset_info_exit;
695 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
699 fill.devices = devices;
701 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
706 * If a device was removed between counting and filling,
707 * we may come up short of fill.max. If a device was
708 * added, we'll have a return of -EAGAIN above.
711 hdr.count = fill.cur;
714 if (copy_to_user((void __user *)arg, &hdr, minsz))
718 if (copy_to_user((void __user *)(arg + minsz), devices,
719 hdr.count * sizeof(*devices)))
726 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
727 struct vfio_pci_hot_reset hdr;
729 struct vfio_pci_group_entry *groups;
730 struct vfio_pci_group_info info;
732 int i, count = 0, ret = 0;
734 minsz = offsetofend(struct vfio_pci_hot_reset, count);
736 if (copy_from_user(&hdr, (void __user *)arg, minsz))
739 if (hdr.argsz < minsz || hdr.flags)
742 /* Can we do a slot or bus reset or neither? */
743 if (!pci_probe_reset_slot(vdev->pdev->slot))
745 else if (pci_probe_reset_bus(vdev->pdev->bus))
749 * We can't let userspace give us an arbitrarily large
750 * buffer to copy, so verify how many we think there
751 * could be. Note groups can have multiple devices so
752 * one group per device is the max.
754 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
760 /* Somewhere between 1 and count is OK */
761 if (!hdr.count || hdr.count > count)
764 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
765 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
766 if (!group_fds || !groups) {
772 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
773 hdr.count * sizeof(*group_fds))) {
780 * For each group_fd, get the group through the vfio external
781 * user interface and store the group and iommu ID. This
782 * ensures the group is held across the reset.
784 for (i = 0; i < hdr.count; i++) {
785 struct vfio_group *group;
786 struct fd f = fdget(group_fds[i]);
792 group = vfio_group_get_external_user(f.file);
795 ret = PTR_ERR(group);
799 groups[i].group = group;
800 groups[i].id = vfio_external_user_iommu_id(group);
805 /* release reference to groups on error */
807 goto hot_reset_release;
809 info.count = hdr.count;
810 info.groups = groups;
813 * Test whether all the affected devices are contained
814 * by the set of groups provided by the user.
816 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
817 vfio_pci_validate_devs,
820 /* User has access, do the reset */
821 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
822 pci_try_reset_bus(vdev->pdev->bus);
825 for (i--; i >= 0; i--)
826 vfio_group_put_external_user(groups[i].group);
835 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
836 size_t count, loff_t *ppos, bool iswrite)
838 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
839 struct vfio_pci_device *vdev = device_data;
841 if (index >= VFIO_PCI_NUM_REGIONS)
845 case VFIO_PCI_CONFIG_REGION_INDEX:
846 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
848 case VFIO_PCI_ROM_REGION_INDEX:
851 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
853 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
854 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
856 case VFIO_PCI_VGA_REGION_INDEX:
857 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
863 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
864 size_t count, loff_t *ppos)
869 return vfio_pci_rw(device_data, buf, count, ppos, false);
872 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
873 size_t count, loff_t *ppos)
878 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
881 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
883 struct vfio_pci_device *vdev = device_data;
884 struct pci_dev *pdev = vdev->pdev;
886 u64 phys_len, req_len, pgoff, req_start;
889 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
891 if (vma->vm_end < vma->vm_start)
893 if ((vma->vm_flags & VM_SHARED) == 0)
895 if (index >= VFIO_PCI_ROM_REGION_INDEX)
897 if (!(pci_resource_flags(pdev, index) & IORESOURCE_MEM))
900 phys_len = pci_resource_len(pdev, index);
901 req_len = vma->vm_end - vma->vm_start;
902 pgoff = vma->vm_pgoff &
903 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
904 req_start = pgoff << PAGE_SHIFT;
906 if (phys_len < PAGE_SIZE || req_start + req_len > phys_len)
909 if (index == vdev->msix_bar) {
911 * Disallow mmaps overlapping the MSI-X table; users don't
912 * get to touch this directly. We could find somewhere
913 * else to map the overlap, but page granularity is only
914 * a recommendation, not a requirement, so the user needs
915 * to know which bits are real. Requiring them to mmap
916 * around the table makes that clear.
919 /* If neither entirely above nor below, then it overlaps */
920 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
921 req_start + req_len <= vdev->msix_offset))
926 * Even though we don't make use of the barmap for the mmap,
927 * we need to request the region and the barmap tracks that.
929 if (!vdev->barmap[index]) {
930 ret = pci_request_selected_regions(pdev,
931 1 << index, "vfio-pci");
935 vdev->barmap[index] = pci_iomap(pdev, index, 0);
936 if (!vdev->barmap[index]) {
937 pci_release_selected_regions(pdev, 1 << index);
942 vma->vm_private_data = vdev;
943 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
944 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
946 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
947 req_len, vma->vm_page_prot);
950 static void vfio_pci_request(void *device_data, unsigned int count)
952 struct vfio_pci_device *vdev = device_data;
954 mutex_lock(&vdev->igate);
956 if (vdev->req_trigger) {
958 dev_notice_ratelimited(&vdev->pdev->dev,
959 "Relaying device request to user (#%u)\n",
961 eventfd_signal(vdev->req_trigger, 1);
962 } else if (count == 0) {
963 dev_warn(&vdev->pdev->dev,
964 "No device request channel registered, blocked until released by user\n");
967 mutex_unlock(&vdev->igate);
970 static const struct vfio_device_ops vfio_pci_ops = {
972 .open = vfio_pci_open,
973 .release = vfio_pci_release,
974 .ioctl = vfio_pci_ioctl,
975 .read = vfio_pci_read,
976 .write = vfio_pci_write,
977 .mmap = vfio_pci_mmap,
978 .request = vfio_pci_request,
981 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
983 struct vfio_pci_device *vdev;
984 struct iommu_group *group;
987 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
990 group = iommu_group_get(&pdev->dev);
994 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
996 iommu_group_put(group);
1001 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1002 mutex_init(&vdev->igate);
1003 spin_lock_init(&vdev->irqlock);
1005 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1007 iommu_group_put(group);
1012 if (vfio_pci_is_vga(pdev)) {
1013 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1014 vga_set_legacy_decoding(pdev,
1015 vfio_pci_set_vga_decode(vdev, false));
1018 if (!disable_idle_d3) {
1020 * pci-core sets the device power state to an unknown value at
1021 * bootup and after being removed from a driver. The only
1022 * transition it allows from this unknown state is to D0, which
1023 * typically happens when a driver calls pci_enable_device().
1024 * We're not ready to enable the device yet, but we do want to
1025 * be able to get to D3. Therefore first do a D0 transition
1026 * before going to D3.
1028 pci_set_power_state(pdev, PCI_D0);
1029 pci_set_power_state(pdev, PCI_D3hot);
1035 static void vfio_pci_remove(struct pci_dev *pdev)
1037 struct vfio_pci_device *vdev;
1039 vdev = vfio_del_group_dev(&pdev->dev);
1043 iommu_group_put(pdev->dev.iommu_group);
1046 if (vfio_pci_is_vga(pdev)) {
1047 vga_client_register(pdev, NULL, NULL, NULL);
1048 vga_set_legacy_decoding(pdev,
1049 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1050 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1053 if (!disable_idle_d3)
1054 pci_set_power_state(pdev, PCI_D0);
1057 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1058 pci_channel_state_t state)
1060 struct vfio_pci_device *vdev;
1061 struct vfio_device *device;
1063 device = vfio_device_get_from_dev(&pdev->dev);
1065 return PCI_ERS_RESULT_DISCONNECT;
1067 vdev = vfio_device_data(device);
1069 vfio_device_put(device);
1070 return PCI_ERS_RESULT_DISCONNECT;
1073 mutex_lock(&vdev->igate);
1075 if (vdev->err_trigger)
1076 eventfd_signal(vdev->err_trigger, 1);
1078 mutex_unlock(&vdev->igate);
1080 vfio_device_put(device);
1082 return PCI_ERS_RESULT_CAN_RECOVER;
1085 static const struct pci_error_handlers vfio_err_handlers = {
1086 .error_detected = vfio_pci_aer_err_detected,
1089 static struct pci_driver vfio_pci_driver = {
1091 .id_table = NULL, /* only dynamic ids */
1092 .probe = vfio_pci_probe,
1093 .remove = vfio_pci_remove,
1094 .err_handler = &vfio_err_handlers,
1097 struct vfio_devices {
1098 struct vfio_device **devices;
1103 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1105 struct vfio_devices *devs = data;
1106 struct vfio_device *device;
1108 if (devs->cur_index == devs->max_index)
1111 device = vfio_device_get_from_dev(&pdev->dev);
1115 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1116 vfio_device_put(device);
1120 devs->devices[devs->cur_index++] = device;
1125 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1126 * this device that are needs_reset and all of the affected devices are unused
1127 * (!refcnt). Callers are required to hold driver_lock when calling this to
1128 * prevent device opens and concurrent bus reset attempts. We prevent device
1129 * unbinds by acquiring and holding a reference to the vfio_device.
1131 * NB: vfio-core considers a group to be viable even if some devices are
1132 * bound to drivers like pci-stub or pcieport. Here we require all devices
1133 * to be bound to vfio_pci since that's the only way we can be sure they
1136 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1138 struct vfio_devices devs = { .cur_index = 0 };
1139 int i = 0, ret = -EINVAL;
1140 bool needs_reset = false, slot = false;
1141 struct vfio_pci_device *tmp;
1143 if (!pci_probe_reset_slot(vdev->pdev->slot))
1145 else if (pci_probe_reset_bus(vdev->pdev->bus))
1148 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1153 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1157 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1158 vfio_pci_get_devs, &devs, slot))
1161 for (i = 0; i < devs.cur_index; i++) {
1162 tmp = vfio_device_data(devs.devices[i]);
1163 if (tmp->needs_reset)
1170 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1171 pci_try_reset_bus(vdev->pdev->bus);
1174 for (i = 0; i < devs.cur_index; i++) {
1175 tmp = vfio_device_data(devs.devices[i]);
1177 tmp->needs_reset = false;
1179 if (!tmp->refcnt && !disable_idle_d3)
1180 pci_set_power_state(tmp->pdev, PCI_D3hot);
1182 vfio_device_put(devs.devices[i]);
1185 kfree(devs.devices);
1188 static void __exit vfio_pci_cleanup(void)
1190 pci_unregister_driver(&vfio_pci_driver);
1191 vfio_pci_uninit_perm_bits();
1194 static void __init vfio_pci_fill_ids(void)
1199 /* no ids passed actually */
1203 /* add ids specified in the module parameter */
1205 while ((id = strsep(&p, ","))) {
1206 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1207 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1213 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1214 &vendor, &device, &subvendor, &subdevice,
1215 &class, &class_mask);
1218 pr_warn("invalid id string \"%s\"\n", id);
1222 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1223 subvendor, subdevice, class, class_mask, 0);
1225 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1226 vendor, device, subvendor, subdevice,
1227 class, class_mask, rc);
1229 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1230 vendor, device, subvendor, subdevice,
1235 static int __init vfio_pci_init(void)
1239 /* Allocate shared config space permision data used by all devices */
1240 ret = vfio_pci_init_perm_bits();
1244 /* Register and scan for devices */
1245 ret = pci_register_driver(&vfio_pci_driver);
1249 vfio_pci_fill_ids();
1254 vfio_pci_uninit_perm_bits();
1258 module_init(vfio_pci_init);
1259 module_exit(vfio_pci_cleanup);
1261 MODULE_VERSION(DRIVER_VERSION);
1262 MODULE_LICENSE("GPL v2");
1263 MODULE_AUTHOR(DRIVER_AUTHOR);
1264 MODULE_DESCRIPTION(DRIVER_DESC);