2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/limits.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static acpi_size dmar_tbl_size;
73 static int dmar_dev_scope_status = 1;
74 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
76 static int alloc_iommu(struct dmar_drhd_unit *drhd);
77 static void free_iommu(struct intel_iommu *iommu);
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
85 if (drhd->include_all)
86 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 list_add_rcu(&drhd->list, &dmar_drhd_units);
91 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
93 struct acpi_dmar_device_scope *scope;
98 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
104 pr_warn("Unsupported device scope\n");
106 start += scope->length;
111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
114 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
117 struct device *tmp_dev;
119 if (*devices && *cnt) {
120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf[64];
132 static struct dmar_pci_notify_info *
133 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
138 struct dmar_pci_notify_info *info;
140 BUG_ON(dev->is_virtfn);
143 * Ignore devices that have a domain number higher than what can
144 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
146 if (pci_domain_nr(dev->bus) > U16_MAX)
149 /* Only generate path[] for device addition event */
150 if (event == BUS_NOTIFY_ADD_DEVICE)
151 for (tmp = dev; tmp; tmp = tmp->bus->self)
154 size = sizeof(*info) + level * sizeof(info->path[0]);
155 if (size <= sizeof(dmar_pci_notify_info_buf)) {
156 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
158 info = kzalloc(size, GFP_KERNEL);
160 pr_warn("Out of memory when allocating notify_info "
161 "for %s.\n", pci_name(dev));
162 if (dmar_dev_scope_status == 0)
163 dmar_dev_scope_status = -ENOMEM;
170 info->seg = pci_domain_nr(dev->bus);
172 if (event == BUS_NOTIFY_ADD_DEVICE) {
173 for (tmp = dev; tmp; tmp = tmp->bus->self) {
175 info->path[level].bus = tmp->bus->number;
176 info->path[level].device = PCI_SLOT(tmp->devfn);
177 info->path[level].function = PCI_FUNC(tmp->devfn);
178 if (pci_is_root_bus(tmp->bus))
179 info->bus = tmp->bus->number;
186 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
188 if ((void *)info != dmar_pci_notify_info_buf)
192 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
193 struct acpi_dmar_pci_path *path, int count)
197 if (info->bus != bus)
199 if (info->level != count)
202 for (i = 0; i < count; i++) {
203 if (path[i].device != info->path[i].device ||
204 path[i].function != info->path[i].function)
216 if (bus == info->path[i].bus &&
217 path[0].device == info->path[i].device &&
218 path[0].function == info->path[i].function) {
219 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
220 bus, path[0].device, path[0].function);
227 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
228 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
229 void *start, void*end, u16 segment,
230 struct dmar_dev_scope *devices,
234 struct device *tmp, *dev = &info->dev->dev;
235 struct acpi_dmar_device_scope *scope;
236 struct acpi_dmar_pci_path *path;
238 if (segment != info->seg)
241 for (; start < end; start += scope->length) {
243 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
247 path = (struct acpi_dmar_pci_path *)(scope + 1);
248 level = (scope->length - sizeof(*scope)) / sizeof(*path);
249 if (!dmar_match_pci_path(info, scope->bus, path, level))
253 * We expect devices with endpoint scope to have normal PCI
254 * headers, and devices with bridge scope to have bridge PCI
255 * headers. However PCI NTB devices may be listed in the
256 * DMAR table with bridge scope, even though they have a
257 * normal PCI header. NTB devices are identified by class
258 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
259 * for this special case.
261 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
262 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
263 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
264 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
265 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
266 pr_warn("Device scope type does not match for %s\n",
267 pci_name(info->dev));
271 for_each_dev_scope(devices, devices_cnt, i, tmp)
273 devices[i].bus = info->dev->bus->number;
274 devices[i].devfn = info->dev->devfn;
275 rcu_assign_pointer(devices[i].dev,
279 BUG_ON(i >= devices_cnt);
285 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
286 struct dmar_dev_scope *devices, int count)
291 if (info->seg != segment)
294 for_each_active_dev_scope(devices, count, index, tmp)
295 if (tmp == &info->dev->dev) {
296 RCU_INIT_POINTER(devices[index].dev, NULL);
305 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
308 struct dmar_drhd_unit *dmaru;
309 struct acpi_dmar_hardware_unit *drhd;
311 for_each_drhd_unit(dmaru) {
312 if (dmaru->include_all)
315 drhd = container_of(dmaru->hdr,
316 struct acpi_dmar_hardware_unit, header);
317 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
318 ((void *)drhd) + drhd->header.length,
320 dmaru->devices, dmaru->devices_cnt);
325 ret = dmar_iommu_notify_scope_dev(info);
326 if (ret < 0 && dmar_dev_scope_status == 0)
327 dmar_dev_scope_status = ret;
332 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
334 struct dmar_drhd_unit *dmaru;
336 for_each_drhd_unit(dmaru)
337 if (dmar_remove_dev_scope(info, dmaru->segment,
338 dmaru->devices, dmaru->devices_cnt))
340 dmar_iommu_notify_scope_dev(info);
343 static int dmar_pci_bus_notifier(struct notifier_block *nb,
344 unsigned long action, void *data)
346 struct pci_dev *pdev = to_pci_dev(data);
347 struct dmar_pci_notify_info *info;
349 /* Only care about add/remove events for physical functions.
350 * For VFs we actually do the lookup based on the corresponding
351 * PF in device_to_iommu() anyway. */
354 if (action != BUS_NOTIFY_ADD_DEVICE &&
355 action != BUS_NOTIFY_REMOVED_DEVICE)
358 info = dmar_alloc_pci_notify_info(pdev, action);
362 down_write(&dmar_global_lock);
363 if (action == BUS_NOTIFY_ADD_DEVICE)
364 dmar_pci_bus_add_dev(info);
365 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
366 dmar_pci_bus_del_dev(info);
367 up_write(&dmar_global_lock);
369 dmar_free_pci_notify_info(info);
374 static struct notifier_block dmar_pci_bus_nb = {
375 .notifier_call = dmar_pci_bus_notifier,
379 static struct dmar_drhd_unit *
380 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
382 struct dmar_drhd_unit *dmaru;
384 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
385 if (dmaru->segment == drhd->segment &&
386 dmaru->reg_base_addr == drhd->address)
393 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
394 * structure which uniquely represent one DMA remapping hardware unit
395 * present in the platform
397 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
399 struct acpi_dmar_hardware_unit *drhd;
400 struct dmar_drhd_unit *dmaru;
403 drhd = (struct acpi_dmar_hardware_unit *)header;
404 dmaru = dmar_find_dmaru(drhd);
408 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
413 * If header is allocated from slab by ACPI _DSM method, we need to
414 * copy the content because the memory buffer will be freed on return.
416 dmaru->hdr = (void *)(dmaru + 1);
417 memcpy(dmaru->hdr, header, header->length);
418 dmaru->reg_base_addr = drhd->address;
419 dmaru->segment = drhd->segment;
420 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
421 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
422 ((void *)drhd) + drhd->header.length,
423 &dmaru->devices_cnt);
424 if (dmaru->devices_cnt && dmaru->devices == NULL) {
429 ret = alloc_iommu(dmaru);
431 dmar_free_dev_scope(&dmaru->devices,
432 &dmaru->devices_cnt);
436 dmar_register_drhd_unit(dmaru);
445 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
447 if (dmaru->devices && dmaru->devices_cnt)
448 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
450 free_iommu(dmaru->iommu);
454 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
457 struct acpi_dmar_andd *andd = (void *)header;
459 /* Check for NUL termination within the designated length */
460 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
462 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
463 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
464 dmi_get_system_info(DMI_BIOS_VENDOR),
465 dmi_get_system_info(DMI_BIOS_VERSION),
466 dmi_get_system_info(DMI_PRODUCT_VERSION));
467 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
470 pr_info("ANDD device: %x name: %s\n", andd->device_number,
476 #ifdef CONFIG_ACPI_NUMA
477 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
479 struct acpi_dmar_rhsa *rhsa;
480 struct dmar_drhd_unit *drhd;
482 rhsa = (struct acpi_dmar_rhsa *)header;
483 for_each_drhd_unit(drhd) {
484 if (drhd->reg_base_addr == rhsa->base_address) {
485 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
487 if (!node_online(node))
489 drhd->iommu->node = node;
494 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
495 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 dmi_get_system_info(DMI_BIOS_VENDOR),
498 dmi_get_system_info(DMI_BIOS_VERSION),
499 dmi_get_system_info(DMI_PRODUCT_VERSION));
500 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
505 #define dmar_parse_one_rhsa dmar_res_noop
509 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
511 struct acpi_dmar_hardware_unit *drhd;
512 struct acpi_dmar_reserved_memory *rmrr;
513 struct acpi_dmar_atsr *atsr;
514 struct acpi_dmar_rhsa *rhsa;
516 switch (header->type) {
517 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
518 drhd = container_of(header, struct acpi_dmar_hardware_unit,
520 pr_info("DRHD base: %#016Lx flags: %#x\n",
521 (unsigned long long)drhd->address, drhd->flags);
523 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
524 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
526 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
527 (unsigned long long)rmrr->base_address,
528 (unsigned long long)rmrr->end_address);
530 case ACPI_DMAR_TYPE_ROOT_ATS:
531 atsr = container_of(header, struct acpi_dmar_atsr, header);
532 pr_info("ATSR flags: %#x\n", atsr->flags);
534 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
535 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
536 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
537 (unsigned long long)rhsa->base_address,
538 rhsa->proximity_domain);
540 case ACPI_DMAR_TYPE_NAMESPACE:
541 /* We don't print this here because we need to sanity-check
542 it first. So print it in dmar_parse_one_andd() instead. */
548 * dmar_table_detect - checks to see if the platform supports DMAR devices
550 static int __init dmar_table_detect(void)
552 acpi_status status = AE_OK;
554 /* if we could find DMAR table, then there are DMAR devices */
555 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
556 (struct acpi_table_header **)&dmar_tbl,
559 if (ACPI_SUCCESS(status) && !dmar_tbl) {
560 pr_warn("Unable to map DMAR\n");
561 status = AE_NOT_FOUND;
564 return (ACPI_SUCCESS(status) ? 1 : 0);
567 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
568 size_t len, struct dmar_res_callback *cb)
571 struct acpi_dmar_header *iter, *next;
572 struct acpi_dmar_header *end = ((void *)start) + len;
574 for (iter = start; iter < end && ret == 0; iter = next) {
575 next = (void *)iter + iter->length;
576 if (iter->length == 0) {
577 /* Avoid looping forever on bad ACPI tables */
578 pr_debug(FW_BUG "Invalid 0-length structure\n");
580 } else if (next > end) {
581 /* Avoid passing table end */
582 pr_warn(FW_BUG "Record passes table end\n");
588 dmar_table_print_dmar_entry(iter);
590 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
591 /* continue for forward compatibility */
592 pr_debug("Unknown DMAR structure type %d\n",
594 } else if (cb->cb[iter->type]) {
595 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
596 } else if (!cb->ignore_unhandled) {
597 pr_warn("No handler for DMAR structure type %d\n",
606 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
607 struct dmar_res_callback *cb)
609 return dmar_walk_remapping_entries((void *)(dmar + 1),
610 dmar->header.length - sizeof(*dmar), cb);
614 * parse_dmar_table - parses the DMA reporting table
617 parse_dmar_table(void)
619 struct acpi_table_dmar *dmar;
622 struct dmar_res_callback cb = {
624 .ignore_unhandled = true,
625 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
626 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
627 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
628 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
629 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
630 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
634 * Do it again, earlier dmar_tbl mapping could be mapped with
640 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
641 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
643 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
645 dmar = (struct acpi_table_dmar *)dmar_tbl;
649 if (dmar->width < PAGE_SHIFT - 1) {
650 pr_warn("Invalid DMAR haw\n");
654 pr_info("Host address width %d\n", dmar->width + 1);
655 ret = dmar_walk_dmar_table(dmar, &cb);
656 if (ret == 0 && drhd_count == 0)
657 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
662 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
663 int cnt, struct pci_dev *dev)
669 for_each_active_dev_scope(devices, cnt, index, tmp)
670 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
673 /* Check our parent */
674 dev = dev->bus->self;
680 struct dmar_drhd_unit *
681 dmar_find_matched_drhd_unit(struct pci_dev *dev)
683 struct dmar_drhd_unit *dmaru;
684 struct acpi_dmar_hardware_unit *drhd;
686 dev = pci_physfn(dev);
689 for_each_drhd_unit(dmaru) {
690 drhd = container_of(dmaru->hdr,
691 struct acpi_dmar_hardware_unit,
694 if (dmaru->include_all &&
695 drhd->segment == pci_domain_nr(dev->bus))
698 if (dmar_pci_device_match(dmaru->devices,
699 dmaru->devices_cnt, dev))
709 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
710 struct acpi_device *adev)
712 struct dmar_drhd_unit *dmaru;
713 struct acpi_dmar_hardware_unit *drhd;
714 struct acpi_dmar_device_scope *scope;
717 struct acpi_dmar_pci_path *path;
719 for_each_drhd_unit(dmaru) {
720 drhd = container_of(dmaru->hdr,
721 struct acpi_dmar_hardware_unit,
724 for (scope = (void *)(drhd + 1);
725 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
726 scope = ((void *)scope) + scope->length) {
727 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
729 if (scope->enumeration_id != device_number)
732 path = (void *)(scope + 1);
733 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
734 dev_name(&adev->dev), dmaru->reg_base_addr,
735 scope->bus, path->device, path->function);
736 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
738 dmaru->devices[i].bus = scope->bus;
739 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
741 rcu_assign_pointer(dmaru->devices[i].dev,
742 get_device(&adev->dev));
745 BUG_ON(i >= dmaru->devices_cnt);
748 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
749 device_number, dev_name(&adev->dev));
752 static int __init dmar_acpi_dev_scope_init(void)
754 struct acpi_dmar_andd *andd;
756 if (dmar_tbl == NULL)
759 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
760 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
761 andd = ((void *)andd) + andd->header.length) {
762 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
764 struct acpi_device *adev;
766 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
769 pr_err("Failed to find handle for ACPI object %s\n",
773 if (acpi_bus_get_device(h, &adev)) {
774 pr_err("Failed to get device for ACPI object %s\n",
778 dmar_acpi_insert_dev_scope(andd->device_number, adev);
784 int __init dmar_dev_scope_init(void)
786 struct pci_dev *dev = NULL;
787 struct dmar_pci_notify_info *info;
789 if (dmar_dev_scope_status != 1)
790 return dmar_dev_scope_status;
792 if (list_empty(&dmar_drhd_units)) {
793 dmar_dev_scope_status = -ENODEV;
795 dmar_dev_scope_status = 0;
797 dmar_acpi_dev_scope_init();
799 for_each_pci_dev(dev) {
803 info = dmar_alloc_pci_notify_info(dev,
804 BUS_NOTIFY_ADD_DEVICE);
807 return dmar_dev_scope_status;
809 dmar_pci_bus_add_dev(info);
810 dmar_free_pci_notify_info(info);
814 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
817 return dmar_dev_scope_status;
821 int __init dmar_table_init(void)
823 static int dmar_table_initialized;
826 if (dmar_table_initialized == 0) {
827 ret = parse_dmar_table();
830 pr_info("Parse DMAR table failure.\n");
831 } else if (list_empty(&dmar_drhd_units)) {
832 pr_info("No DMAR devices found\n");
837 dmar_table_initialized = ret;
839 dmar_table_initialized = 1;
842 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
845 static void warn_invalid_dmar(u64 addr, const char *message)
848 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
849 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
851 dmi_get_system_info(DMI_BIOS_VENDOR),
852 dmi_get_system_info(DMI_BIOS_VERSION),
853 dmi_get_system_info(DMI_PRODUCT_VERSION));
854 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
858 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
860 struct acpi_dmar_hardware_unit *drhd;
864 drhd = (void *)entry;
865 if (!drhd->address) {
866 warn_invalid_dmar(0, "");
871 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
873 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
875 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
879 cap = dmar_readq(addr + DMAR_CAP_REG);
880 ecap = dmar_readq(addr + DMAR_ECAP_REG);
885 early_iounmap(addr, VTD_PAGE_SIZE);
887 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
888 warn_invalid_dmar(drhd->address, " returns all ones");
895 int __init detect_intel_iommu(void)
898 struct dmar_res_callback validate_drhd_cb = {
899 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
900 .ignore_unhandled = true,
903 down_write(&dmar_global_lock);
904 ret = dmar_table_detect();
906 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
908 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
910 /* Make sure ACS will be enabled */
916 x86_init.iommu.iommu_init = intel_iommu_init;
919 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
921 up_write(&dmar_global_lock);
923 return ret ? 1 : -ENODEV;
927 static void unmap_iommu(struct intel_iommu *iommu)
930 release_mem_region(iommu->reg_phys, iommu->reg_size);
934 * map_iommu: map the iommu's registers
935 * @iommu: the iommu to map
936 * @phys_addr: the physical address of the base resgister
938 * Memory map the iommu's registers. Start w/ a single page, and
939 * possibly expand if that turns out to be insufficent.
941 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
945 iommu->reg_phys = phys_addr;
946 iommu->reg_size = VTD_PAGE_SIZE;
948 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
949 pr_err("Can't reserve memory\n");
954 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
956 pr_err("Can't map the region\n");
961 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
962 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
964 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
966 warn_invalid_dmar(phys_addr, " returns all ones");
970 /* the registers might be more than one page */
971 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
972 cap_max_fault_reg_offset(iommu->cap));
973 map_size = VTD_PAGE_ALIGN(map_size);
974 if (map_size > iommu->reg_size) {
976 release_mem_region(iommu->reg_phys, iommu->reg_size);
977 iommu->reg_size = map_size;
978 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
980 pr_err("Can't reserve memory\n");
984 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
986 pr_err("Can't map the region\n");
997 release_mem_region(iommu->reg_phys, iommu->reg_size);
1002 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1004 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1005 DMAR_UNITS_SUPPORTED);
1006 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1009 set_bit(iommu->seq_id, dmar_seq_ids);
1010 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1013 return iommu->seq_id;
1016 static void dmar_free_seq_id(struct intel_iommu *iommu)
1018 if (iommu->seq_id >= 0) {
1019 clear_bit(iommu->seq_id, dmar_seq_ids);
1024 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1026 struct intel_iommu *iommu;
1032 if (!drhd->reg_base_addr) {
1033 warn_invalid_dmar(0, "");
1037 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1041 if (dmar_alloc_seq_id(iommu) < 0) {
1042 pr_err("Failed to allocate seq_id\n");
1047 err = map_iommu(iommu, drhd->reg_base_addr);
1049 pr_err("Failed to map %s\n", iommu->name);
1050 goto error_free_seq_id;
1054 if (cap_sagaw(iommu->cap) == 0) {
1055 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1060 if (!drhd->ignored) {
1061 agaw = iommu_calculate_agaw(iommu);
1063 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1068 if (!drhd->ignored) {
1069 msagaw = iommu_calculate_max_sagaw(iommu);
1071 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1078 iommu->msagaw = msagaw;
1079 iommu->segment = drhd->segment;
1083 ver = readl(iommu->reg + DMAR_VER_REG);
1084 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1086 (unsigned long long)drhd->reg_base_addr,
1087 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1088 (unsigned long long)iommu->cap,
1089 (unsigned long long)iommu->ecap);
1091 /* Reflect status in gcmd */
1092 sts = readl(iommu->reg + DMAR_GSTS_REG);
1093 if (sts & DMA_GSTS_IRES)
1094 iommu->gcmd |= DMA_GCMD_IRE;
1095 if (sts & DMA_GSTS_TES)
1096 iommu->gcmd |= DMA_GCMD_TE;
1097 if (sts & DMA_GSTS_QIES)
1098 iommu->gcmd |= DMA_GCMD_QIE;
1100 raw_spin_lock_init(&iommu->register_lock);
1102 if (intel_iommu_enabled && !drhd->ignored) {
1103 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1107 if (IS_ERR(iommu->iommu_dev)) {
1108 err = PTR_ERR(iommu->iommu_dev);
1113 drhd->iommu = iommu;
1121 dmar_free_seq_id(iommu);
1127 static void free_iommu(struct intel_iommu *iommu)
1129 if (intel_iommu_enabled && !iommu->drhd->ignored)
1130 iommu_device_destroy(iommu->iommu_dev);
1133 if (iommu->pr_irq) {
1134 free_irq(iommu->pr_irq, iommu);
1135 dmar_free_hwirq(iommu->pr_irq);
1138 free_irq(iommu->irq, iommu);
1139 dmar_free_hwirq(iommu->irq);
1144 free_page((unsigned long)iommu->qi->desc);
1145 kfree(iommu->qi->desc_status);
1152 dmar_free_seq_id(iommu);
1157 * Reclaim all the submitted descriptors which have completed its work.
1159 static inline void reclaim_free_desc(struct q_inval *qi)
1161 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1162 qi->desc_status[qi->free_tail] == QI_ABORT) {
1163 qi->desc_status[qi->free_tail] = QI_FREE;
1164 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1169 static int qi_check_fault(struct intel_iommu *iommu, int index)
1173 struct q_inval *qi = iommu->qi;
1174 int wait_index = (index + 1) % QI_LENGTH;
1176 if (qi->desc_status[wait_index] == QI_ABORT)
1179 fault = readl(iommu->reg + DMAR_FSTS_REG);
1182 * If IQE happens, the head points to the descriptor associated
1183 * with the error. No new descriptors are fetched until the IQE
1186 if (fault & DMA_FSTS_IQE) {
1187 head = readl(iommu->reg + DMAR_IQH_REG);
1188 if ((head >> DMAR_IQ_SHIFT) == index) {
1189 pr_err("VT-d detected invalid descriptor: "
1190 "low=%llx, high=%llx\n",
1191 (unsigned long long)qi->desc[index].low,
1192 (unsigned long long)qi->desc[index].high);
1193 memcpy(&qi->desc[index], &qi->desc[wait_index],
1194 sizeof(struct qi_desc));
1195 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1201 * If ITE happens, all pending wait_desc commands are aborted.
1202 * No new descriptors are fetched until the ITE is cleared.
1204 if (fault & DMA_FSTS_ITE) {
1205 head = readl(iommu->reg + DMAR_IQH_REG);
1206 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1208 tail = readl(iommu->reg + DMAR_IQT_REG);
1209 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1211 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1214 if (qi->desc_status[head] == QI_IN_USE)
1215 qi->desc_status[head] = QI_ABORT;
1216 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1217 } while (head != tail);
1219 if (qi->desc_status[wait_index] == QI_ABORT)
1223 if (fault & DMA_FSTS_ICE)
1224 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1230 * Submit the queued invalidation descriptor to the remapping
1231 * hardware unit and wait for its completion.
1233 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1236 struct q_inval *qi = iommu->qi;
1237 struct qi_desc *hw, wait_desc;
1238 int wait_index, index;
1239 unsigned long flags;
1249 raw_spin_lock_irqsave(&qi->q_lock, flags);
1250 while (qi->free_cnt < 3) {
1251 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1253 raw_spin_lock_irqsave(&qi->q_lock, flags);
1256 index = qi->free_head;
1257 wait_index = (index + 1) % QI_LENGTH;
1259 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1263 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1264 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1265 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1267 hw[wait_index] = wait_desc;
1269 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1273 * update the HW tail register indicating the presence of
1276 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1278 while (qi->desc_status[wait_index] != QI_DONE) {
1280 * We will leave the interrupts disabled, to prevent interrupt
1281 * context to queue another cmd while a cmd is already submitted
1282 * and waiting for completion on this cpu. This is to avoid
1283 * a deadlock where the interrupt context can wait indefinitely
1284 * for free slots in the queue.
1286 rc = qi_check_fault(iommu, index);
1290 raw_spin_unlock(&qi->q_lock);
1292 raw_spin_lock(&qi->q_lock);
1295 qi->desc_status[index] = QI_DONE;
1297 reclaim_free_desc(qi);
1298 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1307 * Flush the global interrupt entry cache.
1309 void qi_global_iec(struct intel_iommu *iommu)
1311 struct qi_desc desc;
1313 desc.low = QI_IEC_TYPE;
1316 /* should never fail */
1317 qi_submit_sync(&desc, iommu);
1320 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1323 struct qi_desc desc;
1325 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1326 | QI_CC_GRAN(type) | QI_CC_TYPE;
1329 qi_submit_sync(&desc, iommu);
1332 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1333 unsigned int size_order, u64 type)
1337 struct qi_desc desc;
1340 if (cap_write_drain(iommu->cap))
1343 if (cap_read_drain(iommu->cap))
1346 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1347 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1348 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1349 | QI_IOTLB_AM(size_order);
1351 qi_submit_sync(&desc, iommu);
1354 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1355 u16 qdep, u64 addr, unsigned mask)
1357 struct qi_desc desc;
1360 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1361 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1362 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1364 desc.high = QI_DEV_IOTLB_ADDR(addr);
1366 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1369 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1370 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1372 qi_submit_sync(&desc, iommu);
1376 * Disable Queued Invalidation interface.
1378 void dmar_disable_qi(struct intel_iommu *iommu)
1380 unsigned long flags;
1382 cycles_t start_time = get_cycles();
1384 if (!ecap_qis(iommu->ecap))
1387 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1389 sts = readl(iommu->reg + DMAR_GSTS_REG);
1390 if (!(sts & DMA_GSTS_QIES))
1394 * Give a chance to HW to complete the pending invalidation requests.
1396 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1397 readl(iommu->reg + DMAR_IQH_REG)) &&
1398 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1401 iommu->gcmd &= ~DMA_GCMD_QIE;
1402 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1404 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1405 !(sts & DMA_GSTS_QIES), sts);
1407 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1411 * Enable queued invalidation.
1413 static void __dmar_enable_qi(struct intel_iommu *iommu)
1416 unsigned long flags;
1417 struct q_inval *qi = iommu->qi;
1419 qi->free_head = qi->free_tail = 0;
1420 qi->free_cnt = QI_LENGTH;
1422 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1424 /* write zero to the tail reg */
1425 writel(0, iommu->reg + DMAR_IQT_REG);
1427 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1429 iommu->gcmd |= DMA_GCMD_QIE;
1430 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1432 /* Make sure hardware complete it */
1433 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1435 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1439 * Enable Queued Invalidation interface. This is a must to support
1440 * interrupt-remapping. Also used by DMA-remapping, which replaces
1441 * register based IOTLB invalidation.
1443 int dmar_enable_qi(struct intel_iommu *iommu)
1446 struct page *desc_page;
1448 if (!ecap_qis(iommu->ecap))
1452 * queued invalidation is already setup and enabled.
1457 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1464 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1471 qi->desc = page_address(desc_page);
1473 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1474 if (!qi->desc_status) {
1475 free_page((unsigned long) qi->desc);
1481 raw_spin_lock_init(&qi->q_lock);
1483 __dmar_enable_qi(iommu);
1488 /* iommu interrupt handling. Most stuff are MSI-like. */
1496 static const char *dma_remap_fault_reasons[] =
1499 "Present bit in root entry is clear",
1500 "Present bit in context entry is clear",
1501 "Invalid context entry",
1502 "Access beyond MGAW",
1503 "PTE Write access is not set",
1504 "PTE Read access is not set",
1505 "Next page table ptr is invalid",
1506 "Root table address invalid",
1507 "Context table ptr is invalid",
1508 "non-zero reserved fields in RTP",
1509 "non-zero reserved fields in CTP",
1510 "non-zero reserved fields in PTE",
1511 "PCE for translation request specifies blocking",
1514 static const char *irq_remap_fault_reasons[] =
1516 "Detected reserved fields in the decoded interrupt-remapped request",
1517 "Interrupt index exceeded the interrupt-remapping table size",
1518 "Present field in the IRTE entry is clear",
1519 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1520 "Detected reserved fields in the IRTE entry",
1521 "Blocked a compatibility format interrupt request",
1522 "Blocked an interrupt request due to source-id verification failure",
1525 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1527 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1528 ARRAY_SIZE(irq_remap_fault_reasons))) {
1529 *fault_type = INTR_REMAP;
1530 return irq_remap_fault_reasons[fault_reason - 0x20];
1531 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1532 *fault_type = DMA_REMAP;
1533 return dma_remap_fault_reasons[fault_reason];
1535 *fault_type = UNKNOWN;
1541 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1543 if (iommu->irq == irq)
1544 return DMAR_FECTL_REG;
1545 else if (iommu->pr_irq == irq)
1546 return DMAR_PECTL_REG;
1551 void dmar_msi_unmask(struct irq_data *data)
1553 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1554 int reg = dmar_msi_reg(iommu, data->irq);
1558 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1559 writel(0, iommu->reg + reg);
1560 /* Read a reg to force flush the post write */
1561 readl(iommu->reg + reg);
1562 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1565 void dmar_msi_mask(struct irq_data *data)
1567 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1568 int reg = dmar_msi_reg(iommu, data->irq);
1572 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1573 writel(DMA_FECTL_IM, iommu->reg + reg);
1574 /* Read a reg to force flush the post write */
1575 readl(iommu->reg + reg);
1576 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1579 void dmar_msi_write(int irq, struct msi_msg *msg)
1581 struct intel_iommu *iommu = irq_get_handler_data(irq);
1582 int reg = dmar_msi_reg(iommu, irq);
1585 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1586 writel(msg->data, iommu->reg + reg + 4);
1587 writel(msg->address_lo, iommu->reg + reg + 8);
1588 writel(msg->address_hi, iommu->reg + reg + 12);
1589 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1592 void dmar_msi_read(int irq, struct msi_msg *msg)
1594 struct intel_iommu *iommu = irq_get_handler_data(irq);
1595 int reg = dmar_msi_reg(iommu, irq);
1598 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1599 msg->data = readl(iommu->reg + reg + 4);
1600 msg->address_lo = readl(iommu->reg + reg + 8);
1601 msg->address_hi = readl(iommu->reg + reg + 12);
1602 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1605 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1606 u8 fault_reason, u16 source_id, unsigned long long addr)
1611 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1613 if (fault_type == INTR_REMAP)
1614 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1615 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1616 PCI_FUNC(source_id & 0xFF), addr >> 48,
1617 fault_reason, reason);
1619 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1620 type ? "DMA Read" : "DMA Write",
1621 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1622 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1626 #define PRIMARY_FAULT_REG_LEN (16)
1627 irqreturn_t dmar_fault(int irq, void *dev_id)
1629 struct intel_iommu *iommu = dev_id;
1630 int reg, fault_index;
1634 static DEFINE_RATELIMIT_STATE(rs,
1635 DEFAULT_RATELIMIT_INTERVAL,
1636 DEFAULT_RATELIMIT_BURST);
1638 /* Disable printing, simply clear the fault when ratelimited */
1639 ratelimited = !__ratelimit(&rs);
1641 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1642 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1643 if (fault_status && !ratelimited)
1644 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1646 /* TBD: ignore advanced fault log currently */
1647 if (!(fault_status & DMA_FSTS_PPF))
1650 fault_index = dma_fsts_fault_record_index(fault_status);
1651 reg = cap_fault_reg_offset(iommu->cap);
1659 /* highest 32 bits */
1660 data = readl(iommu->reg + reg +
1661 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1662 if (!(data & DMA_FRCD_F))
1666 fault_reason = dma_frcd_fault_reason(data);
1667 type = dma_frcd_type(data);
1669 data = readl(iommu->reg + reg +
1670 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1671 source_id = dma_frcd_source_id(data);
1673 guest_addr = dmar_readq(iommu->reg + reg +
1674 fault_index * PRIMARY_FAULT_REG_LEN);
1675 guest_addr = dma_frcd_page_addr(guest_addr);
1678 /* clear the fault */
1679 writel(DMA_FRCD_F, iommu->reg + reg +
1680 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1682 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1685 dmar_fault_do_one(iommu, type, fault_reason,
1686 source_id, guest_addr);
1689 if (fault_index >= cap_num_fault_regs(iommu->cap))
1691 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1694 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1697 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1701 int dmar_set_interrupt(struct intel_iommu *iommu)
1706 * Check if the fault interrupt is already initialized.
1711 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1715 pr_err("No free IRQ vectors\n");
1719 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1721 pr_err("Can't request irq\n");
1725 int __init enable_drhd_fault_handling(void)
1727 struct dmar_drhd_unit *drhd;
1728 struct intel_iommu *iommu;
1731 * Enable fault control interrupt.
1733 for_each_iommu(iommu, drhd) {
1735 int ret = dmar_set_interrupt(iommu);
1738 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1739 (unsigned long long)drhd->reg_base_addr, ret);
1744 * Clear any previous faults.
1746 dmar_fault(iommu->irq, iommu);
1747 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1748 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1755 * Re-enable Queued Invalidation interface.
1757 int dmar_reenable_qi(struct intel_iommu *iommu)
1759 if (!ecap_qis(iommu->ecap))
1766 * First disable queued invalidation.
1768 dmar_disable_qi(iommu);
1770 * Then enable queued invalidation again. Since there is no pending
1771 * invalidation requests now, it's safe to re-enable queued
1774 __dmar_enable_qi(iommu);
1780 * Check interrupt remapping support in DMAR table description.
1782 int __init dmar_ir_support(void)
1784 struct acpi_table_dmar *dmar;
1785 dmar = (struct acpi_table_dmar *)dmar_tbl;
1788 return dmar->flags & 0x1;
1791 /* Check whether DMAR units are in use */
1792 static inline bool dmar_in_use(void)
1794 return irq_remapping_enabled || intel_iommu_enabled;
1797 static int __init dmar_free_unused_resources(void)
1799 struct dmar_drhd_unit *dmaru, *dmaru_n;
1804 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1805 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1807 down_write(&dmar_global_lock);
1808 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1809 list_del(&dmaru->list);
1810 dmar_free_drhd(dmaru);
1812 up_write(&dmar_global_lock);
1817 late_initcall(dmar_free_unused_resources);
1818 IOMMU_INIT_POST(detect_intel_iommu);
1821 * DMAR Hotplug Support
1822 * For more details, please refer to Intel(R) Virtualization Technology
1823 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1824 * "Remapping Hardware Unit Hot Plug".
1826 static u8 dmar_hp_uuid[] = {
1827 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1828 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1832 * Currently there's only one revision and BIOS will not check the revision id,
1833 * so use 0 for safety.
1835 #define DMAR_DSM_REV_ID 0
1836 #define DMAR_DSM_FUNC_DRHD 1
1837 #define DMAR_DSM_FUNC_ATSR 2
1838 #define DMAR_DSM_FUNC_RHSA 3
1840 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1842 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1845 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1846 dmar_res_handler_t handler, void *arg)
1849 union acpi_object *obj;
1850 struct acpi_dmar_header *start;
1851 struct dmar_res_callback callback;
1852 static int res_type[] = {
1853 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1854 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1855 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1858 if (!dmar_detect_dsm(handle, func))
1861 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1862 func, NULL, ACPI_TYPE_BUFFER);
1866 memset(&callback, 0, sizeof(callback));
1867 callback.cb[res_type[func]] = handler;
1868 callback.arg[res_type[func]] = arg;
1869 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1870 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1877 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1880 struct dmar_drhd_unit *dmaru;
1882 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1886 ret = dmar_ir_hotplug(dmaru, true);
1888 ret = dmar_iommu_hotplug(dmaru, true);
1893 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1897 struct dmar_drhd_unit *dmaru;
1899 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1904 * All PCI devices managed by this unit should have been destroyed.
1906 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1907 for_each_active_dev_scope(dmaru->devices,
1908 dmaru->devices_cnt, i, dev)
1912 ret = dmar_ir_hotplug(dmaru, false);
1914 ret = dmar_iommu_hotplug(dmaru, false);
1919 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1921 struct dmar_drhd_unit *dmaru;
1923 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1925 list_del_rcu(&dmaru->list);
1927 dmar_free_drhd(dmaru);
1933 static int dmar_hotplug_insert(acpi_handle handle)
1938 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1939 &dmar_validate_one_drhd, (void *)1);
1943 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1944 &dmar_parse_one_drhd, (void *)&drhd_count);
1945 if (ret == 0 && drhd_count == 0) {
1946 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1952 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1953 &dmar_parse_one_rhsa, NULL);
1957 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1958 &dmar_parse_one_atsr, NULL);
1962 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1963 &dmar_hp_add_drhd, NULL);
1967 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1968 &dmar_hp_remove_drhd, NULL);
1970 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1971 &dmar_release_one_atsr, NULL);
1973 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1974 &dmar_hp_release_drhd, NULL);
1979 static int dmar_hotplug_remove(acpi_handle handle)
1983 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1984 &dmar_check_one_atsr, NULL);
1988 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1989 &dmar_hp_remove_drhd, NULL);
1991 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1992 &dmar_release_one_atsr, NULL));
1993 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1994 &dmar_hp_release_drhd, NULL));
1996 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1997 &dmar_hp_add_drhd, NULL);
2003 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2004 void *context, void **retval)
2006 acpi_handle *phdl = retval;
2008 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2010 return AE_CTRL_TERMINATE;
2016 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2019 acpi_handle tmp = NULL;
2025 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2028 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2030 dmar_get_dsm_handle,
2032 if (ACPI_FAILURE(status)) {
2033 pr_warn("Failed to locate _DSM method.\n");
2040 down_write(&dmar_global_lock);
2042 ret = dmar_hotplug_insert(tmp);
2044 ret = dmar_hotplug_remove(tmp);
2045 up_write(&dmar_global_lock);
2050 int dmar_device_add(acpi_handle handle)
2052 return dmar_device_hotplug(handle, true);
2055 int dmar_device_remove(acpi_handle handle)
2057 return dmar_device_hotplug(handle, false);