blob: 17adf1ebcb138f8d488971160272cabf81144fe8 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040039#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070040
Len Browna192a952009-07-28 16:45:54 -040041#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070042
43/* No locks are needed as DMA remapping hardware unit
44 * list is constructed at boot time and hotplug of
45 * these units are not supported by the architecture.
46 */
47LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070048
Suresh Siddha41750d32011-08-23 17:05:18 -070049struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080050static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
52static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53{
54 /*
55 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 * the very end.
57 */
58 if (drhd->include_all)
59 list_add_tail(&drhd->list, &dmar_drhd_units);
60 else
61 list_add(&drhd->list, &dmar_drhd_units);
62}
63
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070064static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
65 struct pci_dev **dev, u16 segment)
66{
67 struct pci_bus *bus;
68 struct pci_dev *pdev = NULL;
69 struct acpi_dmar_pci_path *path;
70 int count;
71
72 bus = pci_find_bus(segment, scope->bus);
73 path = (struct acpi_dmar_pci_path *)(scope + 1);
74 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
75 / sizeof(struct acpi_dmar_pci_path);
76
77 while (count) {
78 if (pdev)
79 pci_dev_put(pdev);
80 /*
81 * Some BIOSes list non-exist devices in DMAR table, just
82 * ignore it
83 */
84 if (!bus) {
85 printk(KERN_WARNING
86 PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
88 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
92 printk(KERN_WARNING PREFIX
93 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
94 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
102 printk(KERN_WARNING PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
112 printk(KERN_WARNING PREFIX
113 "Device scope type does not match for %s\n",
114 pci_name(pdev));
115 return -EINVAL;
116 }
117 *dev = pdev;
118 return 0;
119}
120
121static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
122 struct pci_dev ***devices, u16 segment)
123{
124 struct acpi_dmar_device_scope *scope;
125 void * tmp = start;
126 int index;
127 int ret;
128
129 *cnt = 0;
130 while (start < end) {
131 scope = start;
132 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
133 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700136 printk(KERN_WARNING PREFIX
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100137 "Unsupported device scope\n");
138 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700139 start += scope->length;
140 }
141 if (*cnt == 0)
142 return 0;
143
144 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
145 if (!*devices)
146 return -ENOMEM;
147
148 start = tmp;
149 index = 0;
150 while (start < end) {
151 scope = start;
152 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
153 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
154 ret = dmar_parse_one_dev_scope(scope,
155 &(*devices)[index], segment);
156 if (ret) {
157 kfree(*devices);
158 return ret;
159 }
160 index ++;
161 }
162 start += scope->length;
163 }
164
165 return 0;
166}
167
168/**
169 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
170 * structure which uniquely represent one DMA remapping hardware unit
171 * present in the platform
172 */
173static int __init
174dmar_parse_one_drhd(struct acpi_dmar_header *header)
175{
176 struct acpi_dmar_hardware_unit *drhd;
177 struct dmar_drhd_unit *dmaru;
178 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179
David Woodhousee523b382009-04-10 22:27:48 -0700180 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700181 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
182 if (!dmaru)
183 return -ENOMEM;
184
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700185 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100187 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700188 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
189
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700190 ret = alloc_iommu(dmaru);
191 if (ret) {
192 kfree(dmaru);
193 return ret;
194 }
195 dmar_register_drhd_unit(dmaru);
196 return 0;
197}
198
David Woodhousef82851a2008-10-18 15:43:14 +0100199static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700200{
201 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100202 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700203
204 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
205
Yu Zhao2e824f72008-12-22 16:54:58 +0800206 if (dmaru->include_all)
207 return 0;
208
209 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700210 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700211 &dmaru->devices_cnt, &dmaru->devices,
212 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700213 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700216 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700217 return ret;
218}
219
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700220#ifdef CONFIG_DMAR
221LIST_HEAD(dmar_rmrr_units);
222
223static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
224{
225 list_add(&rmrr->list, &dmar_rmrr_units);
226}
227
228
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700229static int __init
230dmar_parse_one_rmrr(struct acpi_dmar_header *header)
231{
232 struct acpi_dmar_reserved_memory *rmrr;
233 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700234
235 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
236 if (!rmrru)
237 return -ENOMEM;
238
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700239 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700240 rmrr = (struct acpi_dmar_reserved_memory *)header;
241 rmrru->base_address = rmrr->base_address;
242 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700243
244 dmar_register_rmrr_unit(rmrru);
245 return 0;
246}
247
248static int __init
249rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
250{
251 struct acpi_dmar_reserved_memory *rmrr;
252 int ret;
253
254 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700256 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700257 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
258
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700259 if (ret || (rmrru->devices_cnt == 0)) {
260 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700261 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700262 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700263 return ret;
264}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800265
266static LIST_HEAD(dmar_atsr_units);
267
268static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
269{
270 struct acpi_dmar_atsr *atsr;
271 struct dmar_atsr_unit *atsru;
272
273 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
274 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
275 if (!atsru)
276 return -ENOMEM;
277
278 atsru->hdr = hdr;
279 atsru->include_all = atsr->flags & 0x1;
280
281 list_add(&atsru->list, &dmar_atsr_units);
282
283 return 0;
284}
285
286static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
287{
288 int rc;
289 struct acpi_dmar_atsr *atsr;
290
291 if (atsru->include_all)
292 return 0;
293
294 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
295 rc = dmar_parse_dev_scope((void *)(atsr + 1),
296 (void *)atsr + atsr->header.length,
297 &atsru->devices_cnt, &atsru->devices,
298 atsr->segment);
299 if (rc || !atsru->devices_cnt) {
300 list_del(&atsru->list);
301 kfree(atsru);
302 }
303
304 return rc;
305}
306
307int dmar_find_matched_atsr_unit(struct pci_dev *dev)
308{
309 int i;
310 struct pci_bus *bus;
311 struct acpi_dmar_atsr *atsr;
312 struct dmar_atsr_unit *atsru;
313
Yinghaidda56542010-04-09 01:07:55 +0100314 dev = pci_physfn(dev);
315
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800316 list_for_each_entry(atsru, &dmar_atsr_units, list) {
317 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
318 if (atsr->segment == pci_domain_nr(dev->bus))
319 goto found;
320 }
321
322 return 0;
323
324found:
325 for (bus = dev->bus; bus; bus = bus->parent) {
326 struct pci_dev *bridge = bus->self;
327
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900328 if (!bridge || !pci_is_pcie(bridge) ||
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800329 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
330 return 0;
331
332 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
333 for (i = 0; i < atsru->devices_cnt; i++)
334 if (atsru->devices[i] == bridge)
335 return 1;
336 break;
337 }
338 }
339
340 if (atsru->include_all)
341 return 1;
342
343 return 0;
344}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700345#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700346
David Woodhouseaa697072009-10-07 12:18:00 +0100347#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700348static int __init
349dmar_parse_one_rhsa(struct acpi_dmar_header *header)
350{
351 struct acpi_dmar_rhsa *rhsa;
352 struct dmar_drhd_unit *drhd;
353
354 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100355 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700356 if (drhd->reg_base_addr == rhsa->base_address) {
357 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
358
359 if (!node_online(node))
360 node = -1;
361 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100362 return 0;
363 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700364 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100365 WARN_TAINT(
366 1, TAINT_FIRMWARE_WORKAROUND,
367 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
368 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
369 drhd->reg_base_addr,
370 dmi_get_system_info(DMI_BIOS_VENDOR),
371 dmi_get_system_info(DMI_BIOS_VERSION),
372 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700373
David Woodhouseaa697072009-10-07 12:18:00 +0100374 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700375}
David Woodhouseaa697072009-10-07 12:18:00 +0100376#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700377
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700378static void __init
379dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
380{
381 struct acpi_dmar_hardware_unit *drhd;
382 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800383 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700384 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700385
386 switch (header->type) {
387 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800388 drhd = container_of(header, struct acpi_dmar_hardware_unit,
389 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700390 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800391 "DRHD base: %#016Lx flags: %#x\n",
392 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700393 break;
394 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800395 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
396 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700397 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800398 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700399 (unsigned long long)rmrr->base_address,
400 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700401 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800402 case ACPI_DMAR_TYPE_ATSR:
403 atsr = container_of(header, struct acpi_dmar_atsr, header);
404 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
405 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700406 case ACPI_DMAR_HARDWARE_AFFINITY:
407 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
408 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
409 (unsigned long long)rhsa->base_address,
410 rhsa->proximity_domain);
411 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700412 }
413}
414
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700415/**
416 * dmar_table_detect - checks to see if the platform supports DMAR devices
417 */
418static int __init dmar_table_detect(void)
419{
420 acpi_status status = AE_OK;
421
422 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800423 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
424 (struct acpi_table_header **)&dmar_tbl,
425 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700426
427 if (ACPI_SUCCESS(status) && !dmar_tbl) {
428 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
429 status = AE_NOT_FOUND;
430 }
431
432 return (ACPI_SUCCESS(status) ? 1 : 0);
433}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700434
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700435/**
436 * parse_dmar_table - parses the DMA reporting table
437 */
438static int __init
439parse_dmar_table(void)
440{
441 struct acpi_table_dmar *dmar;
442 struct acpi_dmar_header *entry_header;
443 int ret = 0;
444
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700445 /*
446 * Do it again, earlier dmar_tbl mapping could be mapped with
447 * fixed map.
448 */
449 dmar_table_detect();
450
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700451 /*
452 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
453 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
454 */
455 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
456
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700457 dmar = (struct acpi_table_dmar *)dmar_tbl;
458 if (!dmar)
459 return -ENODEV;
460
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700461 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800462 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700463 return -EINVAL;
464 }
465
466 printk (KERN_INFO PREFIX "Host address width %d\n",
467 dmar->width + 1);
468
469 entry_header = (struct acpi_dmar_header *)(dmar + 1);
470 while (((unsigned long)entry_header) <
471 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800472 /* Avoid looping forever on bad ACPI tables */
473 if (entry_header->length == 0) {
474 printk(KERN_WARNING PREFIX
475 "Invalid 0-length structure\n");
476 ret = -EINVAL;
477 break;
478 }
479
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700480 dmar_table_print_dmar_entry(entry_header);
481
482 switch (entry_header->type) {
483 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
484 ret = dmar_parse_one_drhd(entry_header);
485 break;
486 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700487#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700488 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700489#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700490 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800491 case ACPI_DMAR_TYPE_ATSR:
492#ifdef CONFIG_DMAR
493 ret = dmar_parse_one_atsr(entry_header);
494#endif
495 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700496 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100497#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700498 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100499#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700500 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700501 default:
502 printk(KERN_WARNING PREFIX
Roland Dreier4de75cf2009-09-24 01:01:29 +0100503 "Unknown DMAR structure type %d\n",
504 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700505 ret = 0; /* for forward compatibility */
506 break;
507 }
508 if (ret)
509 break;
510
511 entry_header = ((void *)entry_header + entry_header->length);
512 }
513 return ret;
514}
515
Yinghaidda56542010-04-09 01:07:55 +0100516static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700517 struct pci_dev *dev)
518{
519 int index;
520
521 while (dev) {
522 for (index = 0; index < cnt; index++)
523 if (dev == devices[index])
524 return 1;
525
526 /* Check our parent */
527 dev = dev->bus->self;
528 }
529
530 return 0;
531}
532
533struct dmar_drhd_unit *
534dmar_find_matched_drhd_unit(struct pci_dev *dev)
535{
Yu Zhao2e824f72008-12-22 16:54:58 +0800536 struct dmar_drhd_unit *dmaru = NULL;
537 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700538
Yinghaidda56542010-04-09 01:07:55 +0100539 dev = pci_physfn(dev);
540
Yu Zhao2e824f72008-12-22 16:54:58 +0800541 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
542 drhd = container_of(dmaru->hdr,
543 struct acpi_dmar_hardware_unit,
544 header);
545
546 if (dmaru->include_all &&
547 drhd->segment == pci_domain_nr(dev->bus))
548 return dmaru;
549
550 if (dmar_pci_device_match(dmaru->devices,
551 dmaru->devices_cnt, dev))
552 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700553 }
554
555 return NULL;
556}
557
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700558int __init dmar_dev_scope_init(void)
559{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700560 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700561 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700562 int ret = -ENODEV;
563
Suresh Siddhac2c72862011-08-23 17:05:19 -0700564 if (dmar_dev_scope_initialized)
565 return dmar_dev_scope_initialized;
566
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700567 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700568 ret = dmar_parse_dev(drhd);
569 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700570 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700571 }
572
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700573#ifdef CONFIG_DMAR
574 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700575 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800576 struct dmar_atsr_unit *atsr, *atsr_n;
577
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700578 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700579 ret = rmrr_parse_dev(rmrr);
580 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700581 goto fail;
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700582 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800583
584 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
585 ret = atsr_parse_dev(atsr);
586 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700587 goto fail;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800588 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700589 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700590#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700591
Suresh Siddhac2c72862011-08-23 17:05:19 -0700592 dmar_dev_scope_initialized = 1;
593 return 0;
594
595fail:
596 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700597 return ret;
598}
599
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700600
601int __init dmar_table_init(void)
602{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700603 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800604 int ret;
605
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700606 if (dmar_table_initialized)
607 return 0;
608
609 dmar_table_initialized = 1;
610
Fenghua Yu093f87d2007-11-21 15:07:14 -0800611 ret = parse_dmar_table();
612 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700613 if (ret != -ENODEV)
614 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800615 return ret;
616 }
617
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700618 if (list_empty(&dmar_drhd_units)) {
619 printk(KERN_INFO PREFIX "No DMAR devices found\n");
620 return -ENODEV;
621 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800622
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700623#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700624 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800625 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800626
627 if (list_empty(&dmar_atsr_units))
628 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700629#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800630
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700631 return 0;
632}
633
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100634static void warn_invalid_dmar(u64 addr, const char *message)
635{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100636 WARN_TAINT_ONCE(
637 1, TAINT_FIRMWARE_WORKAROUND,
638 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
639 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
640 addr, message,
641 dmi_get_system_info(DMI_BIOS_VENDOR),
642 dmi_get_system_info(DMI_BIOS_VERSION),
643 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100644}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000645
David Woodhouse86cf8982009-11-09 22:15:15 +0000646int __init check_zero_address(void)
647{
648 struct acpi_table_dmar *dmar;
649 struct acpi_dmar_header *entry_header;
650 struct acpi_dmar_hardware_unit *drhd;
651
652 dmar = (struct acpi_table_dmar *)dmar_tbl;
653 entry_header = (struct acpi_dmar_header *)(dmar + 1);
654
655 while (((unsigned long)entry_header) <
656 (((unsigned long)dmar) + dmar_tbl->length)) {
657 /* Avoid looping forever on bad ACPI tables */
658 if (entry_header->length == 0) {
659 printk(KERN_WARNING PREFIX
660 "Invalid 0-length structure\n");
661 return 0;
662 }
663
664 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000665 void __iomem *addr;
666 u64 cap, ecap;
667
David Woodhouse86cf8982009-11-09 22:15:15 +0000668 drhd = (void *)entry_header;
669 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100670 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000671 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000672 }
Chris Wright2c992202009-12-02 09:17:13 +0000673
674 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
675 if (!addr ) {
676 printk("IOMMU: can't validate: %llx\n", drhd->address);
677 goto failed;
678 }
679 cap = dmar_readq(addr + DMAR_CAP_REG);
680 ecap = dmar_readq(addr + DMAR_ECAP_REG);
681 early_iounmap(addr, VTD_PAGE_SIZE);
682 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100683 warn_invalid_dmar(drhd->address,
684 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000685 goto failed;
686 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000687 }
688
689 entry_header = ((void *)entry_header + entry_header->length);
690 }
691 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000692
693failed:
694#ifdef CONFIG_DMAR
695 dmar_disabled = 1;
696#endif
697 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000698}
699
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400700int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700701{
702 int ret;
703
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700704 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000705 if (ret)
706 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700707 {
Youquan Songcacd4212008-10-16 16:31:57 -0700708#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700709 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200710
Suresh Siddha1cb11582008-07-10 11:16:51 -0700711 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700712 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700713 printk(KERN_INFO
714 "Queued invalidation will be enabled to support "
715 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700716#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700717#ifdef CONFIG_DMAR
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800718 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700719 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800720 /* Make sure ACS will be enabled */
721 pci_request_acs();
722 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700723#endif
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900724#ifdef CONFIG_X86
725 if (ret)
726 x86_init.iommu.iommu_init = intel_iommu_init;
727#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700728 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800729 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700730 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400731
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400732 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700733}
734
735
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700736int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700737{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700738 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700739 int map_size;
740 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700741 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100742 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700743 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700744
David Woodhouse6ecbf012009-12-02 09:20:27 +0000745 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100746 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000747 return -EINVAL;
748 }
749
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700750 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
751 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700752 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700753
754 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700755 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700756
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700757 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700758 if (!iommu->reg) {
759 printk(KERN_ERR "IOMMU: can't map the region\n");
760 goto error;
761 }
762 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
763 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
764
David Woodhouse08155652009-08-04 09:17:20 +0100765 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100766 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
David Woodhouse08155652009-08-04 09:17:20 +0100767 goto err_unmap;
768 }
769
Joerg Roedel43f73922009-01-03 23:56:27 +0100770#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800771 agaw = iommu_calculate_agaw(iommu);
772 if (agaw < 0) {
773 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700774 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
775 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100776 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700777 }
778 msagaw = iommu_calculate_max_sagaw(iommu);
779 if (msagaw < 0) {
780 printk(KERN_ERR
781 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800782 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100783 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800784 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100785#endif
Weidong Han1b573682008-12-08 15:34:06 +0800786 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700787 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800788
Suresh Siddhaee34b322009-10-02 11:01:21 -0700789 iommu->node = -1;
790
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700791 /* the registers might be more than one page */
792 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
793 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700794 map_size = VTD_PAGE_ALIGN(map_size);
795 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700796 iounmap(iommu->reg);
797 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
798 if (!iommu->reg) {
799 printk(KERN_ERR "IOMMU: can't map the region\n");
800 goto error;
801 }
802 }
803
804 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100805 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
806 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700807 (unsigned long long)drhd->reg_base_addr,
808 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
809 (unsigned long long)iommu->cap,
810 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700811
812 spin_lock_init(&iommu->register_lock);
813
814 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700815 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100816
817 err_unmap:
818 iounmap(iommu->reg);
819 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700820 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700821 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700822}
823
824void free_iommu(struct intel_iommu *iommu)
825{
826 if (!iommu)
827 return;
828
829#ifdef CONFIG_DMAR
830 free_dmar_iommu(iommu);
831#endif
832
833 if (iommu->reg)
834 iounmap(iommu->reg);
835 kfree(iommu);
836}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837
838/*
839 * Reclaim all the submitted descriptors which have completed its work.
840 */
841static inline void reclaim_free_desc(struct q_inval *qi)
842{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800843 while (qi->desc_status[qi->free_tail] == QI_DONE ||
844 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700845 qi->desc_status[qi->free_tail] = QI_FREE;
846 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
847 qi->free_cnt++;
848 }
849}
850
Yu Zhao704126a2009-01-04 16:28:52 +0800851static int qi_check_fault(struct intel_iommu *iommu, int index)
852{
853 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800854 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800855 struct q_inval *qi = iommu->qi;
856 int wait_index = (index + 1) % QI_LENGTH;
857
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800858 if (qi->desc_status[wait_index] == QI_ABORT)
859 return -EAGAIN;
860
Yu Zhao704126a2009-01-04 16:28:52 +0800861 fault = readl(iommu->reg + DMAR_FSTS_REG);
862
863 /*
864 * If IQE happens, the head points to the descriptor associated
865 * with the error. No new descriptors are fetched until the IQE
866 * is cleared.
867 */
868 if (fault & DMA_FSTS_IQE) {
869 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800870 if ((head >> DMAR_IQ_SHIFT) == index) {
871 printk(KERN_ERR "VT-d detected invalid descriptor: "
872 "low=%llx, high=%llx\n",
873 (unsigned long long)qi->desc[index].low,
874 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800875 memcpy(&qi->desc[index], &qi->desc[wait_index],
876 sizeof(struct qi_desc));
877 __iommu_flush_cache(iommu, &qi->desc[index],
878 sizeof(struct qi_desc));
879 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
880 return -EINVAL;
881 }
882 }
883
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800884 /*
885 * If ITE happens, all pending wait_desc commands are aborted.
886 * No new descriptors are fetched until the ITE is cleared.
887 */
888 if (fault & DMA_FSTS_ITE) {
889 head = readl(iommu->reg + DMAR_IQH_REG);
890 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
891 head |= 1;
892 tail = readl(iommu->reg + DMAR_IQT_REG);
893 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
894
895 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
896
897 do {
898 if (qi->desc_status[head] == QI_IN_USE)
899 qi->desc_status[head] = QI_ABORT;
900 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
901 } while (head != tail);
902
903 if (qi->desc_status[wait_index] == QI_ABORT)
904 return -EAGAIN;
905 }
906
907 if (fault & DMA_FSTS_ICE)
908 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
909
Yu Zhao704126a2009-01-04 16:28:52 +0800910 return 0;
911}
912
Suresh Siddhafe962e92008-07-10 11:16:42 -0700913/*
914 * Submit the queued invalidation descriptor to the remapping
915 * hardware unit and wait for its completion.
916 */
Yu Zhao704126a2009-01-04 16:28:52 +0800917int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700918{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800919 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700920 struct q_inval *qi = iommu->qi;
921 struct qi_desc *hw, wait_desc;
922 int wait_index, index;
923 unsigned long flags;
924
925 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800926 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700927
928 hw = qi->desc;
929
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800930restart:
931 rc = 0;
932
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700933 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700934 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700935 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700936 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700937 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700938 }
939
940 index = qi->free_head;
941 wait_index = (index + 1) % QI_LENGTH;
942
943 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
944
945 hw[index] = *desc;
946
Yu Zhao704126a2009-01-04 16:28:52 +0800947 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
948 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700949 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
950
951 hw[wait_index] = wait_desc;
952
953 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
954 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
955
956 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
957 qi->free_cnt -= 2;
958
Suresh Siddhafe962e92008-07-10 11:16:42 -0700959 /*
960 * update the HW tail register indicating the presence of
961 * new descriptors.
962 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800963 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700964
965 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700966 /*
967 * We will leave the interrupts disabled, to prevent interrupt
968 * context to queue another cmd while a cmd is already submitted
969 * and waiting for completion on this cpu. This is to avoid
970 * a deadlock where the interrupt context can wait indefinitely
971 * for free slots in the queue.
972 */
Yu Zhao704126a2009-01-04 16:28:52 +0800973 rc = qi_check_fault(iommu, index);
974 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800975 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800976
Suresh Siddhafe962e92008-07-10 11:16:42 -0700977 spin_unlock(&qi->q_lock);
978 cpu_relax();
979 spin_lock(&qi->q_lock);
980 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800981
982 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700983
984 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700985 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800986
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800987 if (rc == -EAGAIN)
988 goto restart;
989
Yu Zhao704126a2009-01-04 16:28:52 +0800990 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700991}
992
993/*
994 * Flush the global interrupt entry cache.
995 */
996void qi_global_iec(struct intel_iommu *iommu)
997{
998 struct qi_desc desc;
999
1000 desc.low = QI_IEC_TYPE;
1001 desc.high = 0;
1002
Yu Zhao704126a2009-01-04 16:28:52 +08001003 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001004 qi_submit_sync(&desc, iommu);
1005}
1006
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001007void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1008 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001009{
Youquan Song3481f212008-10-16 16:31:55 -07001010 struct qi_desc desc;
1011
Youquan Song3481f212008-10-16 16:31:55 -07001012 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1013 | QI_CC_GRAN(type) | QI_CC_TYPE;
1014 desc.high = 0;
1015
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001016 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001017}
1018
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001019void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1020 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001021{
1022 u8 dw = 0, dr = 0;
1023
1024 struct qi_desc desc;
1025 int ih = 0;
1026
Youquan Song3481f212008-10-16 16:31:55 -07001027 if (cap_write_drain(iommu->cap))
1028 dw = 1;
1029
1030 if (cap_read_drain(iommu->cap))
1031 dr = 1;
1032
1033 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1034 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1035 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1036 | QI_IOTLB_AM(size_order);
1037
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001038 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001039}
1040
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001041void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1042 u64 addr, unsigned mask)
1043{
1044 struct qi_desc desc;
1045
1046 if (mask) {
1047 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1048 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1049 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1050 } else
1051 desc.high = QI_DEV_IOTLB_ADDR(addr);
1052
1053 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1054 qdep = 0;
1055
1056 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1057 QI_DIOTLB_TYPE;
1058
1059 qi_submit_sync(&desc, iommu);
1060}
1061
Suresh Siddhafe962e92008-07-10 11:16:42 -07001062/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001063 * Disable Queued Invalidation interface.
1064 */
1065void dmar_disable_qi(struct intel_iommu *iommu)
1066{
1067 unsigned long flags;
1068 u32 sts;
1069 cycles_t start_time = get_cycles();
1070
1071 if (!ecap_qis(iommu->ecap))
1072 return;
1073
1074 spin_lock_irqsave(&iommu->register_lock, flags);
1075
1076 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1077 if (!(sts & DMA_GSTS_QIES))
1078 goto end;
1079
1080 /*
1081 * Give a chance to HW to complete the pending invalidation requests.
1082 */
1083 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1084 readl(iommu->reg + DMAR_IQH_REG)) &&
1085 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1086 cpu_relax();
1087
1088 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001089 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1090
1091 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1092 !(sts & DMA_GSTS_QIES), sts);
1093end:
1094 spin_unlock_irqrestore(&iommu->register_lock, flags);
1095}
1096
1097/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001098 * Enable queued invalidation.
1099 */
1100static void __dmar_enable_qi(struct intel_iommu *iommu)
1101{
David Woodhousec416daa2009-05-10 20:30:58 +01001102 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001103 unsigned long flags;
1104 struct q_inval *qi = iommu->qi;
1105
1106 qi->free_head = qi->free_tail = 0;
1107 qi->free_cnt = QI_LENGTH;
1108
1109 spin_lock_irqsave(&iommu->register_lock, flags);
1110
1111 /* write zero to the tail reg */
1112 writel(0, iommu->reg + DMAR_IQT_REG);
1113
1114 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1115
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001116 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001117 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001118
1119 /* Make sure hardware complete it */
1120 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1121
1122 spin_unlock_irqrestore(&iommu->register_lock, flags);
1123}
1124
1125/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001126 * Enable Queued Invalidation interface. This is a must to support
1127 * interrupt-remapping. Also used by DMA-remapping, which replaces
1128 * register based IOTLB invalidation.
1129 */
1130int dmar_enable_qi(struct intel_iommu *iommu)
1131{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001132 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001133 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001134
1135 if (!ecap_qis(iommu->ecap))
1136 return -ENOENT;
1137
1138 /*
1139 * queued invalidation is already setup and enabled.
1140 */
1141 if (iommu->qi)
1142 return 0;
1143
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001144 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001145 if (!iommu->qi)
1146 return -ENOMEM;
1147
1148 qi = iommu->qi;
1149
Suresh Siddha751cafe2009-10-02 11:01:22 -07001150
1151 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1152 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001153 kfree(qi);
1154 iommu->qi = 0;
1155 return -ENOMEM;
1156 }
1157
Suresh Siddha751cafe2009-10-02 11:01:22 -07001158 qi->desc = page_address(desc_page);
1159
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001160 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001161 if (!qi->desc_status) {
1162 free_page((unsigned long) qi->desc);
1163 kfree(qi);
1164 iommu->qi = 0;
1165 return -ENOMEM;
1166 }
1167
1168 qi->free_head = qi->free_tail = 0;
1169 qi->free_cnt = QI_LENGTH;
1170
1171 spin_lock_init(&qi->q_lock);
1172
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001173 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001174
1175 return 0;
1176}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001177
1178/* iommu interrupt handling. Most stuff are MSI-like. */
1179
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001180enum faulttype {
1181 DMA_REMAP,
1182 INTR_REMAP,
1183 UNKNOWN,
1184};
1185
1186static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001187{
1188 "Software",
1189 "Present bit in root entry is clear",
1190 "Present bit in context entry is clear",
1191 "Invalid context entry",
1192 "Access beyond MGAW",
1193 "PTE Write access is not set",
1194 "PTE Read access is not set",
1195 "Next page table ptr is invalid",
1196 "Root table address invalid",
1197 "Context table ptr is invalid",
1198 "non-zero reserved fields in RTP",
1199 "non-zero reserved fields in CTP",
1200 "non-zero reserved fields in PTE",
1201};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001202
1203static const char *intr_remap_fault_reasons[] =
1204{
1205 "Detected reserved fields in the decoded interrupt-remapped request",
1206 "Interrupt index exceeded the interrupt-remapping table size",
1207 "Present field in the IRTE entry is clear",
1208 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1209 "Detected reserved fields in the IRTE entry",
1210 "Blocked a compatibility format interrupt request",
1211 "Blocked an interrupt request due to source-id verification failure",
1212};
1213
Suresh Siddha0ac24912009-03-16 17:04:54 -07001214#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1215
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001216const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001217{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001218 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1219 ARRAY_SIZE(intr_remap_fault_reasons))) {
1220 *fault_type = INTR_REMAP;
1221 return intr_remap_fault_reasons[fault_reason - 0x20];
1222 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1223 *fault_type = DMA_REMAP;
1224 return dma_remap_fault_reasons[fault_reason];
1225 } else {
1226 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001227 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001228 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001229}
1230
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001231void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001232{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001233 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001234 unsigned long flag;
1235
1236 /* unmask it */
1237 spin_lock_irqsave(&iommu->register_lock, flag);
1238 writel(0, iommu->reg + DMAR_FECTL_REG);
1239 /* Read a reg to force flush the post write */
1240 readl(iommu->reg + DMAR_FECTL_REG);
1241 spin_unlock_irqrestore(&iommu->register_lock, flag);
1242}
1243
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001244void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001245{
1246 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001247 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001248
1249 /* mask it */
1250 spin_lock_irqsave(&iommu->register_lock, flag);
1251 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1252 /* Read a reg to force flush the post write */
1253 readl(iommu->reg + DMAR_FECTL_REG);
1254 spin_unlock_irqrestore(&iommu->register_lock, flag);
1255}
1256
1257void dmar_msi_write(int irq, struct msi_msg *msg)
1258{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001259 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001260 unsigned long flag;
1261
1262 spin_lock_irqsave(&iommu->register_lock, flag);
1263 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1264 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1265 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1266 spin_unlock_irqrestore(&iommu->register_lock, flag);
1267}
1268
1269void dmar_msi_read(int irq, struct msi_msg *msg)
1270{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001271 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001272 unsigned long flag;
1273
1274 spin_lock_irqsave(&iommu->register_lock, flag);
1275 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1276 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1277 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1278 spin_unlock_irqrestore(&iommu->register_lock, flag);
1279}
1280
1281static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1282 u8 fault_reason, u16 source_id, unsigned long long addr)
1283{
1284 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001285 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001286
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001287 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001288
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001289 if (fault_type == INTR_REMAP)
1290 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1291 "fault index %llx\n"
1292 "INTR-REMAP:[fault reason %02d] %s\n",
1293 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1294 PCI_FUNC(source_id & 0xFF), addr >> 48,
1295 fault_reason, reason);
1296 else
1297 printk(KERN_ERR
1298 "DMAR:[%s] Request device [%02x:%02x.%d] "
1299 "fault addr %llx \n"
1300 "DMAR:[fault reason %02d] %s\n",
1301 (type ? "DMA Read" : "DMA Write"),
1302 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1303 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001304 return 0;
1305}
1306
1307#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001308irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001309{
1310 struct intel_iommu *iommu = dev_id;
1311 int reg, fault_index;
1312 u32 fault_status;
1313 unsigned long flag;
1314
1315 spin_lock_irqsave(&iommu->register_lock, flag);
1316 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001317 if (fault_status)
1318 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1319 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001320
1321 /* TBD: ignore advanced fault log currently */
1322 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001323 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001324
1325 fault_index = dma_fsts_fault_record_index(fault_status);
1326 reg = cap_fault_reg_offset(iommu->cap);
1327 while (1) {
1328 u8 fault_reason;
1329 u16 source_id;
1330 u64 guest_addr;
1331 int type;
1332 u32 data;
1333
1334 /* highest 32 bits */
1335 data = readl(iommu->reg + reg +
1336 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1337 if (!(data & DMA_FRCD_F))
1338 break;
1339
1340 fault_reason = dma_frcd_fault_reason(data);
1341 type = dma_frcd_type(data);
1342
1343 data = readl(iommu->reg + reg +
1344 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1345 source_id = dma_frcd_source_id(data);
1346
1347 guest_addr = dmar_readq(iommu->reg + reg +
1348 fault_index * PRIMARY_FAULT_REG_LEN);
1349 guest_addr = dma_frcd_page_addr(guest_addr);
1350 /* clear the fault */
1351 writel(DMA_FRCD_F, iommu->reg + reg +
1352 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1353
1354 spin_unlock_irqrestore(&iommu->register_lock, flag);
1355
1356 dmar_fault_do_one(iommu, type, fault_reason,
1357 source_id, guest_addr);
1358
1359 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001360 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001361 fault_index = 0;
1362 spin_lock_irqsave(&iommu->register_lock, flag);
1363 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001364clear_rest:
1365 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001366 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001367 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001368
1369 spin_unlock_irqrestore(&iommu->register_lock, flag);
1370 return IRQ_HANDLED;
1371}
1372
1373int dmar_set_interrupt(struct intel_iommu *iommu)
1374{
1375 int irq, ret;
1376
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001377 /*
1378 * Check if the fault interrupt is already initialized.
1379 */
1380 if (iommu->irq)
1381 return 0;
1382
Suresh Siddha0ac24912009-03-16 17:04:54 -07001383 irq = create_irq();
1384 if (!irq) {
1385 printk(KERN_ERR "IOMMU: no free vectors\n");
1386 return -EINVAL;
1387 }
1388
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001389 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001390 iommu->irq = irq;
1391
1392 ret = arch_setup_dmar_msi(irq);
1393 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001394 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001395 iommu->irq = 0;
1396 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001397 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001398 }
1399
Thomas Gleixner477694e2011-07-19 16:25:42 +02001400 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001401 if (ret)
1402 printk(KERN_ERR "IOMMU: can't request irq\n");
1403 return ret;
1404}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001405
1406int __init enable_drhd_fault_handling(void)
1407{
1408 struct dmar_drhd_unit *drhd;
1409
1410 /*
1411 * Enable fault control interrupt.
1412 */
1413 for_each_drhd_unit(drhd) {
1414 int ret;
1415 struct intel_iommu *iommu = drhd->iommu;
1416 ret = dmar_set_interrupt(iommu);
1417
1418 if (ret) {
1419 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1420 " interrupt, ret %d\n",
1421 (unsigned long long)drhd->reg_base_addr, ret);
1422 return -1;
1423 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001424
1425 /*
1426 * Clear any previous faults.
1427 */
1428 dmar_fault(iommu->irq, iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001429 }
1430
1431 return 0;
1432}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001433
1434/*
1435 * Re-enable Queued Invalidation interface.
1436 */
1437int dmar_reenable_qi(struct intel_iommu *iommu)
1438{
1439 if (!ecap_qis(iommu->ecap))
1440 return -ENOENT;
1441
1442 if (!iommu->qi)
1443 return -ENOENT;
1444
1445 /*
1446 * First disable queued invalidation.
1447 */
1448 dmar_disable_qi(iommu);
1449 /*
1450 * Then enable queued invalidation again. Since there is no pending
1451 * invalidation requests now, it's safe to re-enable queued
1452 * invalidation.
1453 */
1454 __dmar_enable_qi(iommu);
1455
1456 return 0;
1457}
Youquan Song074835f2009-09-09 12:05:39 -04001458
1459/*
1460 * Check interrupt remapping support in DMAR table description.
1461 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001462int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001463{
1464 struct acpi_table_dmar *dmar;
1465 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001466 if (!dmar)
1467 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001468 return dmar->flags & 0x1;
1469}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001470IOMMU_INIT_POST(detect_intel_iommu);