2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37 #include <linux/vfio.h>
38 #include <linux/workqueue.h>
40 #define DRIVER_VERSION "0.2"
41 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
42 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
44 static bool allow_unsafe_interrupts;
45 module_param_named(allow_unsafe_interrupts,
46 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
47 MODULE_PARM_DESC(allow_unsafe_interrupts,
48 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
50 static bool disable_hugepages;
51 module_param_named(disable_hugepages,
52 disable_hugepages, bool, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(disable_hugepages,
54 "Disable VFIO IOMMU support for IOMMU hugepages.");
56 static unsigned int dma_entry_limit __read_mostly = U16_MAX;
57 module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
58 MODULE_PARM_DESC(dma_entry_limit,
59 "Maximum number of user DMA mappings per container (65535).");
62 struct list_head domain_list;
64 struct rb_root dma_list;
65 unsigned int dma_avail;
71 struct iommu_domain *domain;
72 struct list_head next;
73 struct list_head group_list;
74 int prot; /* IOMMU_CACHE */
75 bool fgsp; /* Fine-grained super pages */
80 dma_addr_t iova; /* Device address */
81 unsigned long vaddr; /* Process virtual addr */
82 size_t size; /* Map size (bytes) */
83 int prot; /* IOMMU_READ/WRITE */
87 struct iommu_group *iommu_group;
88 struct list_head next;
92 * This code handles mapping and unmapping of user data buffers
93 * into DMA'ble space using the IOMMU
96 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
97 dma_addr_t start, size_t size)
99 struct rb_node *node = iommu->dma_list.rb_node;
102 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
104 if (start + size <= dma->iova)
105 node = node->rb_left;
106 else if (start >= dma->iova + dma->size)
107 node = node->rb_right;
115 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
117 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
118 struct vfio_dma *dma;
122 dma = rb_entry(parent, struct vfio_dma, node);
124 if (new->iova + new->size <= dma->iova)
125 link = &(*link)->rb_left;
127 link = &(*link)->rb_right;
130 rb_link_node(&new->node, parent, link);
131 rb_insert_color(&new->node, &iommu->dma_list);
134 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
136 rb_erase(&old->node, &iommu->dma_list);
139 static int vfio_lock_acct(long npage, bool *lock_cap)
147 return -ESRCH; /* process exited */
149 ret = down_write_killable(¤t->mm->mmap_sem);
152 if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
155 limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
157 if (current->mm->locked_vm + npage > limit)
163 current->mm->locked_vm += npage;
165 up_write(¤t->mm->mmap_sem);
172 * Some mappings aren't backed by a struct page, for example an mmap'd
173 * MMIO range for our own or another device. These use a different
174 * pfn conversion and shouldn't be tracked as locked pages.
176 static bool is_invalid_reserved_pfn(unsigned long pfn)
178 if (pfn_valid(pfn)) {
180 struct page *tail = pfn_to_page(pfn);
181 struct page *head = compound_head(tail);
182 reserved = !!(PageReserved(head));
185 * "head" is not a dangling pointer
186 * (compound_head takes care of that)
187 * but the hugepage may have been split
188 * from under us (and we may not hold a
189 * reference count on the head page so it can
190 * be reused before we run PageReferenced), so
191 * we've to check PageTail before returning
198 return PageReserved(tail);
204 static int put_pfn(unsigned long pfn, int prot)
206 if (!is_invalid_reserved_pfn(pfn)) {
207 struct page *page = pfn_to_page(pfn);
208 if (prot & IOMMU_WRITE)
216 static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
217 unsigned long vaddr, unsigned long *pfn,
222 ret = follow_pfn(vma, vaddr, pfn);
224 bool unlocked = false;
226 ret = fixup_user_fault(NULL, mm, vaddr,
228 (write_fault ? FAULT_FLAG_WRITE : 0),
236 ret = follow_pfn(vma, vaddr, pfn);
242 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
244 struct page *page[1];
245 struct vm_area_struct *vma;
248 if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
249 *pfn = page_to_pfn(page[0]);
253 down_read(¤t->mm->mmap_sem);
256 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
258 if (vma && vma->vm_flags & VM_PFNMAP) {
259 ret = follow_fault_pfn(vma, current->mm, vaddr, pfn, prot & IOMMU_WRITE);
263 if (!ret && !is_invalid_reserved_pfn(*pfn))
267 up_read(¤t->mm->mmap_sem);
273 * Attempt to pin pages. We really don't want to track all the pfns and
274 * the iommu can only map chunks of consecutive pfns anyway, so get the
275 * first page and all consecutive pages with the same locking.
277 static long vfio_pin_pages(unsigned long vaddr, long npage,
278 int prot, unsigned long *pfn_base)
280 unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
281 bool lock_cap = capable(CAP_IPC_LOCK);
288 ret = vaddr_get_pfn(vaddr, prot, pfn_base);
292 rsvd = is_invalid_reserved_pfn(*pfn_base);
294 if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
295 put_pfn(*pfn_base, prot);
296 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
297 limit << PAGE_SHIFT);
301 if (unlikely(disable_hugepages))
304 /* Lock all the consecutive pages from pfn_base */
305 for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
306 ret = vaddr_get_pfn(vaddr, prot, &pfn);
310 if (pfn != *pfn_base + i ||
311 rsvd != is_invalid_reserved_pfn(pfn)) {
316 if (!rsvd && !lock_cap &&
317 current->mm->locked_vm + i + 1 > limit) {
319 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
320 __func__, limit << PAGE_SHIFT);
328 ret = vfio_lock_acct(i, &lock_cap);
333 for (pfn = *pfn_base ; i ; pfn++, i--)
343 static long vfio_unpin_pages(unsigned long pfn, long npage,
344 int prot, bool do_accounting)
346 unsigned long unlocked = 0;
349 for (i = 0; i < npage; i++)
350 unlocked += put_pfn(pfn++, prot);
353 vfio_lock_acct(-unlocked, NULL);
358 static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
360 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
361 struct vfio_domain *domain, *d;
367 * We use the IOMMU to track the physical addresses, otherwise we'd
368 * need a much more complicated tracking system. Unfortunately that
369 * means we need to use one of the iommu domains to figure out the
370 * pfns to unpin. The rest need to be unmapped in advance so we have
371 * no iommu translations remaining when the pages are unpinned.
373 domain = d = list_first_entry(&iommu->domain_list,
374 struct vfio_domain, next);
376 list_for_each_entry_continue(d, &iommu->domain_list, next) {
377 iommu_unmap(d->domain, dma->iova, dma->size);
382 size_t unmapped, len;
383 phys_addr_t phys, next;
385 phys = iommu_iova_to_phys(domain->domain, iova);
386 if (WARN_ON(!phys)) {
392 * To optimize for fewer iommu_unmap() calls, each of which
393 * may require hardware cache flushing, try to find the
394 * largest contiguous physical memory chunk to unmap.
396 for (len = PAGE_SIZE;
397 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
398 next = iommu_iova_to_phys(domain->domain, iova + len);
399 if (next != phys + len)
403 unmapped = iommu_unmap(domain->domain, iova, len);
404 if (WARN_ON(!unmapped))
407 unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
408 unmapped >> PAGE_SHIFT,
415 vfio_lock_acct(-unlocked, NULL);
418 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
420 vfio_unmap_unpin(iommu, dma);
421 vfio_unlink_dma(iommu, dma);
426 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
428 struct vfio_domain *domain;
429 unsigned long bitmap = ULONG_MAX;
431 mutex_lock(&iommu->lock);
432 list_for_each_entry(domain, &iommu->domain_list, next)
433 bitmap &= domain->domain->pgsize_bitmap;
434 mutex_unlock(&iommu->lock);
437 * In case the IOMMU supports page sizes smaller than PAGE_SIZE
438 * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
439 * That way the user will be able to map/unmap buffers whose size/
440 * start address is aligned with PAGE_SIZE. Pinning code uses that
441 * granularity while iommu driver can use the sub-PAGE_SIZE size
444 if (bitmap & ~PAGE_MASK) {
452 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
453 struct vfio_iommu_type1_dma_unmap *unmap)
456 struct vfio_dma *dma;
460 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
462 if (unmap->iova & mask)
464 if (!unmap->size || unmap->size & mask)
467 WARN_ON(mask & PAGE_MASK);
469 mutex_lock(&iommu->lock);
472 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
473 * avoid tracking individual mappings. This means that the granularity
474 * of the original mapping was lost and the user was allowed to attempt
475 * to unmap any range. Depending on the contiguousness of physical
476 * memory and page sizes supported by the IOMMU, arbitrary unmaps may
477 * or may not have worked. We only guaranteed unmap granularity
478 * matching the original mapping; even though it was untracked here,
479 * the original mappings are reflected in IOMMU mappings. This
480 * resulted in a couple unusual behaviors. First, if a range is not
481 * able to be unmapped, ex. a set of 4k pages that was mapped as a
482 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
483 * a zero sized unmap. Also, if an unmap request overlaps the first
484 * address of a hugepage, the IOMMU will unmap the entire hugepage.
485 * This also returns success and the returned unmap size reflects the
486 * actual size unmapped.
488 * We attempt to maintain compatibility with this "v1" interface, but
489 * we take control out of the hands of the IOMMU. Therefore, an unmap
490 * request offset from the beginning of the original mapping will
491 * return success with zero sized unmap. And an unmap request covering
492 * the first iova of mapping will unmap the entire range.
494 * The v2 version of this interface intends to be more deterministic.
495 * Unmap requests must fully cover previous mappings. Multiple
496 * mappings may still be unmaped by specifying large ranges, but there
497 * must not be any previous mappings bisected by the range. An error
498 * will be returned if these conditions are not met. The v2 interface
499 * will only return success and a size of zero if there were no
500 * mappings within the range.
503 dma = vfio_find_dma(iommu, unmap->iova, 0);
504 if (dma && dma->iova != unmap->iova) {
508 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
509 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
515 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
516 if (!iommu->v2 && unmap->iova > dma->iova)
518 unmapped += dma->size;
519 vfio_remove_dma(iommu, dma);
523 mutex_unlock(&iommu->lock);
525 /* Report how much was unmapped */
526 unmap->size = unmapped;
532 * Turns out AMD IOMMU has a page table bug where it won't map large pages
533 * to a region that previously mapped smaller pages. This should be fixed
534 * soon, so this is just a temporary workaround to break mappings down into
535 * PAGE_SIZE. Better to map smaller pages than nothing.
537 static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
538 unsigned long pfn, long npage, int prot)
543 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
544 ret = iommu_map(domain->domain, iova,
545 (phys_addr_t)pfn << PAGE_SHIFT,
546 PAGE_SIZE, prot | domain->prot);
551 for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
552 iommu_unmap(domain->domain, iova, PAGE_SIZE);
557 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
558 unsigned long pfn, long npage, int prot)
560 struct vfio_domain *d;
563 list_for_each_entry(d, &iommu->domain_list, next) {
564 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
565 npage << PAGE_SHIFT, prot | d->prot);
568 map_try_harder(d, iova, pfn, npage, prot))
578 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
579 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
584 static int vfio_dma_do_map(struct vfio_iommu *iommu,
585 struct vfio_iommu_type1_dma_map *map)
587 dma_addr_t iova = map->iova;
588 unsigned long vaddr = map->vaddr;
589 size_t size = map->size;
591 int ret = 0, prot = 0;
593 struct vfio_dma *dma;
596 /* Verify that none of our __u64 fields overflow */
597 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
600 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
602 WARN_ON(mask & PAGE_MASK);
604 /* READ/WRITE from device perspective */
605 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
607 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
610 if (!prot || !size || (size | iova | vaddr) & mask)
613 /* Don't allow IOVA or virtual address wrap */
614 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
617 mutex_lock(&iommu->lock);
619 if (vfio_find_dma(iommu, iova, size)) {
620 mutex_unlock(&iommu->lock);
624 if (!iommu->dma_avail) {
625 mutex_unlock(&iommu->lock);
629 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
631 mutex_unlock(&iommu->lock);
640 /* Insert zero-sized and grow as we map chunks of it */
641 vfio_link_dma(iommu, dma);
644 /* Pin a contiguous chunk of memory */
645 npage = vfio_pin_pages(vaddr + dma->size,
646 size >> PAGE_SHIFT, prot, &pfn);
654 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
656 vfio_unpin_pages(pfn, npage, prot, true);
660 size -= npage << PAGE_SHIFT;
661 dma->size += npage << PAGE_SHIFT;
665 vfio_remove_dma(iommu, dma);
667 mutex_unlock(&iommu->lock);
671 static int vfio_bus_type(struct device *dev, void *data)
673 struct bus_type **bus = data;
675 if (*bus && *bus != dev->bus)
683 static int vfio_iommu_replay(struct vfio_iommu *iommu,
684 struct vfio_domain *domain)
686 struct vfio_domain *d;
690 /* Arbitrarily pick the first domain in the list for lookups */
691 d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
692 n = rb_first(&iommu->dma_list);
694 /* If there's not a domain, there better not be any mappings */
695 if (WARN_ON(n && !d))
698 for (; n; n = rb_next(n)) {
699 struct vfio_dma *dma;
702 dma = rb_entry(n, struct vfio_dma, node);
705 while (iova < dma->iova + dma->size) {
706 phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
709 if (WARN_ON(!phys)) {
716 while (iova + size < dma->iova + dma->size &&
717 phys + size == iommu_iova_to_phys(d->domain,
721 ret = iommu_map(domain->domain, iova, phys,
722 size, dma->prot | domain->prot);
734 * We change our unmap behavior slightly depending on whether the IOMMU
735 * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
736 * for practically any contiguous power-of-two mapping we give it. This means
737 * we don't need to look for contiguous chunks ourselves to make unmapping
738 * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
739 * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
740 * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
741 * hugetlbfs is in use.
743 static void vfio_test_domain_fgsp(struct vfio_domain *domain)
746 int ret, order = get_order(PAGE_SIZE * 2);
748 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
752 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
753 IOMMU_READ | IOMMU_WRITE | domain->prot);
755 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
757 if (unmapped == PAGE_SIZE)
758 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
763 __free_pages(pages, order);
766 static int vfio_iommu_type1_attach_group(void *iommu_data,
767 struct iommu_group *iommu_group)
769 struct vfio_iommu *iommu = iommu_data;
770 struct vfio_group *group, *g;
771 struct vfio_domain *domain, *d;
772 struct bus_type *bus = NULL;
775 mutex_lock(&iommu->lock);
777 list_for_each_entry(d, &iommu->domain_list, next) {
778 list_for_each_entry(g, &d->group_list, next) {
779 if (g->iommu_group != iommu_group)
782 mutex_unlock(&iommu->lock);
787 group = kzalloc(sizeof(*group), GFP_KERNEL);
788 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
789 if (!group || !domain) {
794 group->iommu_group = iommu_group;
796 /* Determine bus_type in order to allocate a domain */
797 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
801 domain->domain = iommu_domain_alloc(bus);
802 if (!domain->domain) {
807 if (iommu->nesting) {
810 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
816 ret = iommu_attach_group(domain->domain, iommu_group);
820 INIT_LIST_HEAD(&domain->group_list);
821 list_add(&group->next, &domain->group_list);
823 if (!allow_unsafe_interrupts &&
824 !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
825 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
831 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
832 domain->prot |= IOMMU_CACHE;
835 * Try to match an existing compatible domain. We don't want to
836 * preclude an IOMMU driver supporting multiple bus_types and being
837 * able to include different bus_types in the same IOMMU domain, so
838 * we test whether the domains use the same iommu_ops rather than
839 * testing if they're on the same bus_type.
841 list_for_each_entry(d, &iommu->domain_list, next) {
842 if (d->domain->ops == domain->domain->ops &&
843 d->prot == domain->prot) {
844 iommu_detach_group(domain->domain, iommu_group);
845 if (!iommu_attach_group(d->domain, iommu_group)) {
846 list_add(&group->next, &d->group_list);
847 iommu_domain_free(domain->domain);
849 mutex_unlock(&iommu->lock);
853 ret = iommu_attach_group(domain->domain, iommu_group);
859 vfio_test_domain_fgsp(domain);
861 /* replay mappings on new domains */
862 ret = vfio_iommu_replay(iommu, domain);
866 list_add(&domain->next, &iommu->domain_list);
868 mutex_unlock(&iommu->lock);
873 iommu_detach_group(domain->domain, iommu_group);
875 iommu_domain_free(domain->domain);
879 mutex_unlock(&iommu->lock);
883 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
885 struct rb_node *node;
887 while ((node = rb_first(&iommu->dma_list)))
888 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
891 static void vfio_iommu_type1_detach_group(void *iommu_data,
892 struct iommu_group *iommu_group)
894 struct vfio_iommu *iommu = iommu_data;
895 struct vfio_domain *domain;
896 struct vfio_group *group;
898 mutex_lock(&iommu->lock);
900 list_for_each_entry(domain, &iommu->domain_list, next) {
901 list_for_each_entry(group, &domain->group_list, next) {
902 if (group->iommu_group != iommu_group)
905 iommu_detach_group(domain->domain, iommu_group);
906 list_del(&group->next);
909 * Group ownership provides privilege, if the group
910 * list is empty, the domain goes away. If it's the
911 * last domain, then all the mappings go away too.
913 if (list_empty(&domain->group_list)) {
914 if (list_is_singular(&iommu->domain_list))
915 vfio_iommu_unmap_unpin_all(iommu);
916 iommu_domain_free(domain->domain);
917 list_del(&domain->next);
925 mutex_unlock(&iommu->lock);
928 static void *vfio_iommu_type1_open(unsigned long arg)
930 struct vfio_iommu *iommu;
932 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
934 return ERR_PTR(-ENOMEM);
937 case VFIO_TYPE1_IOMMU:
939 case VFIO_TYPE1_NESTING_IOMMU:
940 iommu->nesting = true;
941 case VFIO_TYPE1v2_IOMMU:
946 return ERR_PTR(-EINVAL);
949 INIT_LIST_HEAD(&iommu->domain_list);
950 iommu->dma_list = RB_ROOT;
951 iommu->dma_avail = dma_entry_limit;
952 mutex_init(&iommu->lock);
957 static void vfio_iommu_type1_release(void *iommu_data)
959 struct vfio_iommu *iommu = iommu_data;
960 struct vfio_domain *domain, *domain_tmp;
961 struct vfio_group *group, *group_tmp;
963 vfio_iommu_unmap_unpin_all(iommu);
965 list_for_each_entry_safe(domain, domain_tmp,
966 &iommu->domain_list, next) {
967 list_for_each_entry_safe(group, group_tmp,
968 &domain->group_list, next) {
969 iommu_detach_group(domain->domain, group->iommu_group);
970 list_del(&group->next);
973 iommu_domain_free(domain->domain);
974 list_del(&domain->next);
981 static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
983 struct vfio_domain *domain;
986 mutex_lock(&iommu->lock);
987 list_for_each_entry(domain, &iommu->domain_list, next) {
988 if (!(domain->prot & IOMMU_CACHE)) {
993 mutex_unlock(&iommu->lock);
998 static long vfio_iommu_type1_ioctl(void *iommu_data,
999 unsigned int cmd, unsigned long arg)
1001 struct vfio_iommu *iommu = iommu_data;
1002 unsigned long minsz;
1004 if (cmd == VFIO_CHECK_EXTENSION) {
1006 case VFIO_TYPE1_IOMMU:
1007 case VFIO_TYPE1v2_IOMMU:
1008 case VFIO_TYPE1_NESTING_IOMMU:
1010 case VFIO_DMA_CC_IOMMU:
1013 return vfio_domains_have_iommu_cache(iommu);
1017 } else if (cmd == VFIO_IOMMU_GET_INFO) {
1018 struct vfio_iommu_type1_info info;
1020 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
1022 if (copy_from_user(&info, (void __user *)arg, minsz))
1025 if (info.argsz < minsz)
1028 info.flags = VFIO_IOMMU_INFO_PGSIZES;
1030 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1032 return copy_to_user((void __user *)arg, &info, minsz) ?
1035 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1036 struct vfio_iommu_type1_dma_map map;
1037 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
1038 VFIO_DMA_MAP_FLAG_WRITE;
1040 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
1042 if (copy_from_user(&map, (void __user *)arg, minsz))
1045 if (map.argsz < minsz || map.flags & ~mask)
1048 return vfio_dma_do_map(iommu, &map);
1050 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
1051 struct vfio_iommu_type1_dma_unmap unmap;
1054 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
1056 if (copy_from_user(&unmap, (void __user *)arg, minsz))
1059 if (unmap.argsz < minsz || unmap.flags)
1062 ret = vfio_dma_do_unmap(iommu, &unmap);
1066 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1073 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
1074 .name = "vfio-iommu-type1",
1075 .owner = THIS_MODULE,
1076 .open = vfio_iommu_type1_open,
1077 .release = vfio_iommu_type1_release,
1078 .ioctl = vfio_iommu_type1_ioctl,
1079 .attach_group = vfio_iommu_type1_attach_group,
1080 .detach_group = vfio_iommu_type1_detach_group,
1083 static int __init vfio_iommu_type1_init(void)
1085 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
1088 static void __exit vfio_iommu_type1_cleanup(void)
1090 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
1093 module_init(vfio_iommu_type1_init);
1094 module_exit(vfio_iommu_type1_cleanup);
1096 MODULE_VERSION(DRIVER_VERSION);
1097 MODULE_LICENSE("GPL v2");
1098 MODULE_AUTHOR(DRIVER_AUTHOR);
1099 MODULE_DESCRIPTION(DRIVER_DESC);