2 * VFIO PCI I/O Port & MMIO access
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
20 #include <linux/vfio.h>
21 #include <linux/vgaarb.h>
23 #include "vfio_pci_private.h"
25 #ifdef __LITTLE_ENDIAN
26 #define vfio_ioread64 ioread64
27 #define vfio_iowrite64 iowrite64
28 #define vfio_ioread32 ioread32
29 #define vfio_iowrite32 iowrite32
30 #define vfio_ioread16 ioread16
31 #define vfio_iowrite16 iowrite16
33 #define vfio_ioread64 ioread64be
34 #define vfio_iowrite64 iowrite64be
35 #define vfio_ioread32 ioread32be
36 #define vfio_iowrite32 iowrite32be
37 #define vfio_ioread16 ioread16be
38 #define vfio_iowrite16 iowrite16be
40 #define vfio_ioread8 ioread8
41 #define vfio_iowrite8 iowrite8
44 * Read or write from an __iomem region (MMIO or I/O port) with an excluded
45 * range which is inaccessible. The excluded range drops writes and fills
46 * reads with -1. This is intended for handling MSI-X vector tables and
47 * leftover space for ROM BARs.
49 static ssize_t do_io_rw(void __iomem *io, char __user *buf,
50 loff_t off, size_t count, size_t x_start,
51 size_t x_end, bool iswrite)
56 size_t fillable, filled;
59 fillable = min(count, (size_t)(x_start - off));
60 else if (off >= x_end)
65 if (fillable >= 4 && !(off % 4)) {
69 if (copy_from_user(&val, buf, 4))
72 vfio_iowrite32(val, io + off);
74 val = vfio_ioread32(io + off);
76 if (copy_to_user(buf, &val, 4))
81 } else if (fillable >= 2 && !(off % 2)) {
85 if (copy_from_user(&val, buf, 2))
88 vfio_iowrite16(val, io + off);
90 val = vfio_ioread16(io + off);
92 if (copy_to_user(buf, &val, 2))
97 } else if (fillable) {
101 if (copy_from_user(&val, buf, 1))
104 vfio_iowrite8(val, io + off);
106 val = vfio_ioread8(io + off);
108 if (copy_to_user(buf, &val, 1))
114 /* Fill reads with -1, drop writes */
115 filled = min(count, (size_t)(x_end - off));
120 for (i = 0; i < filled; i++)
121 if (copy_to_user(buf + i, &val, 1))
135 static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
137 struct pci_dev *pdev = vdev->pdev;
141 if (vdev->barmap[bar])
144 ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
148 io = pci_iomap(pdev, bar, 0);
150 pci_release_selected_regions(pdev, 1 << bar);
154 vdev->barmap[bar] = io;
159 ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
160 size_t count, loff_t *ppos, bool iswrite)
162 struct pci_dev *pdev = vdev->pdev;
163 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
164 int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
165 size_t x_start = 0, x_end = 0;
168 struct resource *res = &vdev->pdev->resource[bar];
171 if (pci_resource_start(pdev, bar))
172 end = pci_resource_len(pdev, bar);
173 else if (bar == PCI_ROM_RESOURCE &&
174 pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
182 count = min(count, (size_t)(end - pos));
184 if (res->flags & IORESOURCE_MEM) {
185 down_read(&vdev->memory_lock);
186 if (!__vfio_pci_memory_enabled(vdev)) {
187 up_read(&vdev->memory_lock);
192 if (bar == PCI_ROM_RESOURCE) {
194 * The ROM can fill less space than the BAR, so we start the
195 * excluded range at the end of the actual ROM. This makes
196 * filling large ROM BARs much faster.
198 io = pci_map_rom(pdev, &x_start);
205 int ret = vfio_pci_setup_barmap(vdev, bar);
211 io = vdev->barmap[bar];
214 if (bar == vdev->msix_bar) {
215 x_start = vdev->msix_offset;
216 x_end = vdev->msix_offset + vdev->msix_size;
219 done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
224 if (bar == PCI_ROM_RESOURCE)
225 pci_unmap_rom(pdev, io);
227 if (res->flags & IORESOURCE_MEM)
228 up_read(&vdev->memory_lock);
233 ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
234 size_t count, loff_t *ppos, bool iswrite)
237 loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
238 void __iomem *iomem = NULL;
250 case 0xa0000 ... 0xbffff:
251 count = min(count, (size_t)(0xc0000 - pos));
252 iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
254 rsrc = VGA_RSRC_LEGACY_MEM;
257 case 0x3b0 ... 0x3bb:
258 count = min(count, (size_t)(0x3bc - pos));
259 iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
261 rsrc = VGA_RSRC_LEGACY_IO;
264 case 0x3c0 ... 0x3df:
265 count = min(count, (size_t)(0x3e0 - pos));
266 iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
268 rsrc = VGA_RSRC_LEGACY_IO;
278 ret = vga_get_interruptible(vdev->pdev, rsrc);
280 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
284 done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
286 vga_put(vdev->pdev, rsrc);
288 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
296 static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
298 struct vfio_pci_ioeventfd *ioeventfd = opaque;
300 switch (ioeventfd->count) {
302 vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
305 vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
308 vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
312 vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
320 long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
321 uint64_t data, int count, int fd)
323 struct pci_dev *pdev = vdev->pdev;
324 loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
325 int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
326 struct vfio_pci_ioeventfd *ioeventfd;
328 /* Only support ioeventfds into BARs */
329 if (bar > VFIO_PCI_BAR5_REGION_INDEX)
332 if (pos + count > pci_resource_len(pdev, bar))
335 /* Disallow ioeventfds working around MSI-X table writes */
336 if (bar == vdev->msix_bar &&
337 !(pos + count <= vdev->msix_offset ||
338 pos >= vdev->msix_offset + vdev->msix_size))
346 ret = vfio_pci_setup_barmap(vdev, bar);
350 mutex_lock(&vdev->ioeventfds_lock);
352 list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
353 if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
354 ioeventfd->data == data && ioeventfd->count == count) {
356 vfio_virqfd_disable(&ioeventfd->virqfd);
357 list_del(&ioeventfd->next);
358 vdev->ioeventfds_nr--;
373 if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
378 ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
384 ioeventfd->addr = vdev->barmap[bar] + pos;
385 ioeventfd->data = data;
386 ioeventfd->pos = pos;
387 ioeventfd->bar = bar;
388 ioeventfd->count = count;
390 ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
391 NULL, NULL, &ioeventfd->virqfd, fd);
397 list_add(&ioeventfd->next, &vdev->ioeventfds_list);
398 vdev->ioeventfds_nr++;
401 mutex_unlock(&vdev->ioeventfds_lock);