blob: f4c51709d132db8fbd0316a257d1d603e3af0277 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070038
Len Browna192a952009-07-28 16:45:54 -040039#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070040
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046
47static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080048static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070062static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134 printk(KERN_WARNING PREFIX
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 "Unsupported device scope\n");
136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177
David Woodhousee523b382009-04-10 22:27:48 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700183 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700184 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100185 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
David Woodhousef82851a2008-10-18 15:43:14 +0100197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700198{
199 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100200 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
Yu Zhao2e824f72008-12-22 16:54:58 +0800204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700211 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700213 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 return ret;
216}
217
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700218#ifdef CONFIG_DMAR
219LIST_HEAD(dmar_rmrr_units);
220
221static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222{
223 list_add(&rmrr->list, &dmar_rmrr_units);
224}
225
226
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700237 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700254 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700259 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700260 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700261 return ret;
262}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800263
264static LIST_HEAD(dmar_atsr_units);
265
266static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267{
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
270
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
273 if (!atsru)
274 return -ENOMEM;
275
276 atsru->hdr = hdr;
277 atsru->include_all = atsr->flags & 0x1;
278
279 list_add(&atsru->list, &dmar_atsr_units);
280
281 return 0;
282}
283
284static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285{
286 int rc;
287 struct acpi_dmar_atsr *atsr;
288
289 if (atsru->include_all)
290 return 0;
291
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
296 atsr->segment);
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
299 kfree(atsru);
300 }
301
302 return rc;
303}
304
305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306{
307 int i;
308 struct pci_bus *bus;
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
311
Yinghaidda56542010-04-09 01:07:55 +0100312 dev = pci_physfn(dev);
313
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
317 goto found;
318 }
319
320 return 0;
321
322found:
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
325
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900326 if (!bridge || !pci_is_pcie(bridge) ||
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
328 return 0;
329
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
333 return 1;
334 break;
335 }
336 }
337
338 if (atsru->include_all)
339 return 1;
340
341 return 0;
342}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700343#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700344
David Woodhouseaa697072009-10-07 12:18:00 +0100345#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700346static int __init
347dmar_parse_one_rhsa(struct acpi_dmar_header *header)
348{
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
351
352 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100353 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
356
357 if (!node_online(node))
358 node = -1;
359 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100360 return 0;
361 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700362 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100363 WARN_TAINT(
364 1, TAINT_FIRMWARE_WORKAROUND,
365 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
366 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
367 drhd->reg_base_addr,
368 dmi_get_system_info(DMI_BIOS_VENDOR),
369 dmi_get_system_info(DMI_BIOS_VERSION),
370 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700371
David Woodhouseaa697072009-10-07 12:18:00 +0100372 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700373}
David Woodhouseaa697072009-10-07 12:18:00 +0100374#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700375
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700376static void __init
377dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
378{
379 struct acpi_dmar_hardware_unit *drhd;
380 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800381 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700382 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700383
384 switch (header->type) {
385 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800386 drhd = container_of(header, struct acpi_dmar_hardware_unit,
387 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700388 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800389 "DRHD base: %#016Lx flags: %#x\n",
390 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700391 break;
392 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800393 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
394 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700395 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800396 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700397 (unsigned long long)rmrr->base_address,
398 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700399 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800400 case ACPI_DMAR_TYPE_ATSR:
401 atsr = container_of(header, struct acpi_dmar_atsr, header);
402 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
403 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700404 case ACPI_DMAR_HARDWARE_AFFINITY:
405 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
406 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
407 (unsigned long long)rhsa->base_address,
408 rhsa->proximity_domain);
409 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700410 }
411}
412
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700413/**
414 * dmar_table_detect - checks to see if the platform supports DMAR devices
415 */
416static int __init dmar_table_detect(void)
417{
418 acpi_status status = AE_OK;
419
420 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800421 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
422 (struct acpi_table_header **)&dmar_tbl,
423 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700424
425 if (ACPI_SUCCESS(status) && !dmar_tbl) {
426 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
427 status = AE_NOT_FOUND;
428 }
429
430 return (ACPI_SUCCESS(status) ? 1 : 0);
431}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700432
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700433/**
434 * parse_dmar_table - parses the DMA reporting table
435 */
436static int __init
437parse_dmar_table(void)
438{
439 struct acpi_table_dmar *dmar;
440 struct acpi_dmar_header *entry_header;
441 int ret = 0;
442
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700443 /*
444 * Do it again, earlier dmar_tbl mapping could be mapped with
445 * fixed map.
446 */
447 dmar_table_detect();
448
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700449 /*
450 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
451 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
452 */
453 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
454
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700455 dmar = (struct acpi_table_dmar *)dmar_tbl;
456 if (!dmar)
457 return -ENODEV;
458
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700459 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800460 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700461 return -EINVAL;
462 }
463
464 printk (KERN_INFO PREFIX "Host address width %d\n",
465 dmar->width + 1);
466
467 entry_header = (struct acpi_dmar_header *)(dmar + 1);
468 while (((unsigned long)entry_header) <
469 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800470 /* Avoid looping forever on bad ACPI tables */
471 if (entry_header->length == 0) {
472 printk(KERN_WARNING PREFIX
473 "Invalid 0-length structure\n");
474 ret = -EINVAL;
475 break;
476 }
477
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700478 dmar_table_print_dmar_entry(entry_header);
479
480 switch (entry_header->type) {
481 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
482 ret = dmar_parse_one_drhd(entry_header);
483 break;
484 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700485#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700486 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700487#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700488 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800489 case ACPI_DMAR_TYPE_ATSR:
490#ifdef CONFIG_DMAR
491 ret = dmar_parse_one_atsr(entry_header);
492#endif
493 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700494 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100495#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700496 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100497#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700498 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700499 default:
500 printk(KERN_WARNING PREFIX
Roland Dreier4de75cf2009-09-24 01:01:29 +0100501 "Unknown DMAR structure type %d\n",
502 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700503 ret = 0; /* for forward compatibility */
504 break;
505 }
506 if (ret)
507 break;
508
509 entry_header = ((void *)entry_header + entry_header->length);
510 }
511 return ret;
512}
513
Yinghaidda56542010-04-09 01:07:55 +0100514static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700515 struct pci_dev *dev)
516{
517 int index;
518
519 while (dev) {
520 for (index = 0; index < cnt; index++)
521 if (dev == devices[index])
522 return 1;
523
524 /* Check our parent */
525 dev = dev->bus->self;
526 }
527
528 return 0;
529}
530
531struct dmar_drhd_unit *
532dmar_find_matched_drhd_unit(struct pci_dev *dev)
533{
Yu Zhao2e824f72008-12-22 16:54:58 +0800534 struct dmar_drhd_unit *dmaru = NULL;
535 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700536
Yinghaidda56542010-04-09 01:07:55 +0100537 dev = pci_physfn(dev);
538
Yu Zhao2e824f72008-12-22 16:54:58 +0800539 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
540 drhd = container_of(dmaru->hdr,
541 struct acpi_dmar_hardware_unit,
542 header);
543
544 if (dmaru->include_all &&
545 drhd->segment == pci_domain_nr(dev->bus))
546 return dmaru;
547
548 if (dmar_pci_device_match(dmaru->devices,
549 dmaru->devices_cnt, dev))
550 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700551 }
552
553 return NULL;
554}
555
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700556int __init dmar_dev_scope_init(void)
557{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700558 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700559 int ret = -ENODEV;
560
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700561 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700562 ret = dmar_parse_dev(drhd);
563 if (ret)
564 return ret;
565 }
566
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700567#ifdef CONFIG_DMAR
568 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700569 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800570 struct dmar_atsr_unit *atsr, *atsr_n;
571
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700572 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700573 ret = rmrr_parse_dev(rmrr);
574 if (ret)
575 return ret;
576 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800577
578 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
579 ret = atsr_parse_dev(atsr);
580 if (ret)
581 return ret;
582 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700583 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700584#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700585
586 return ret;
587}
588
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700589
590int __init dmar_table_init(void)
591{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700592 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800593 int ret;
594
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700595 if (dmar_table_initialized)
596 return 0;
597
598 dmar_table_initialized = 1;
599
Fenghua Yu093f87d2007-11-21 15:07:14 -0800600 ret = parse_dmar_table();
601 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700602 if (ret != -ENODEV)
603 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800604 return ret;
605 }
606
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700607 if (list_empty(&dmar_drhd_units)) {
608 printk(KERN_INFO PREFIX "No DMAR devices found\n");
609 return -ENODEV;
610 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800611
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700612#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700613 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800614 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800615
616 if (list_empty(&dmar_atsr_units))
617 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700618#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800619
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700620 return 0;
621}
622
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100623static void warn_invalid_dmar(u64 addr, const char *message)
624{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100625 WARN_TAINT_ONCE(
626 1, TAINT_FIRMWARE_WORKAROUND,
627 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
628 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
629 addr, message,
630 dmi_get_system_info(DMI_BIOS_VENDOR),
631 dmi_get_system_info(DMI_BIOS_VERSION),
632 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100633}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000634
David Woodhouse86cf8982009-11-09 22:15:15 +0000635int __init check_zero_address(void)
636{
637 struct acpi_table_dmar *dmar;
638 struct acpi_dmar_header *entry_header;
639 struct acpi_dmar_hardware_unit *drhd;
640
641 dmar = (struct acpi_table_dmar *)dmar_tbl;
642 entry_header = (struct acpi_dmar_header *)(dmar + 1);
643
644 while (((unsigned long)entry_header) <
645 (((unsigned long)dmar) + dmar_tbl->length)) {
646 /* Avoid looping forever on bad ACPI tables */
647 if (entry_header->length == 0) {
648 printk(KERN_WARNING PREFIX
649 "Invalid 0-length structure\n");
650 return 0;
651 }
652
653 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000654 void __iomem *addr;
655 u64 cap, ecap;
656
David Woodhouse86cf8982009-11-09 22:15:15 +0000657 drhd = (void *)entry_header;
658 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100659 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000660 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000661 }
Chris Wright2c992202009-12-02 09:17:13 +0000662
663 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
664 if (!addr ) {
665 printk("IOMMU: can't validate: %llx\n", drhd->address);
666 goto failed;
667 }
668 cap = dmar_readq(addr + DMAR_CAP_REG);
669 ecap = dmar_readq(addr + DMAR_ECAP_REG);
670 early_iounmap(addr, VTD_PAGE_SIZE);
671 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100672 warn_invalid_dmar(drhd->address,
673 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000674 goto failed;
675 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000676 }
677
678 entry_header = ((void *)entry_header + entry_header->length);
679 }
680 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000681
682failed:
683#ifdef CONFIG_DMAR
684 dmar_disabled = 1;
685#endif
686 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000687}
688
Suresh Siddha2ae21012008-07-10 11:16:43 -0700689void __init detect_intel_iommu(void)
690{
691 int ret;
692
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700693 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000694 if (ret)
695 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700696 {
Youquan Songcacd4212008-10-16 16:31:57 -0700697#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700698 struct acpi_table_dmar *dmar;
699 /*
700 * for now we will disable dma-remapping when interrupt
701 * remapping is enabled.
702 * When support for queued invalidation for IOTLB invalidation
703 * is added, we will not need this any more.
704 */
705 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700706 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700707 printk(KERN_INFO
708 "Queued invalidation will be enabled to support "
709 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700710#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700711#ifdef CONFIG_DMAR
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800712 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700713 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800714 /* Make sure ACS will be enabled */
715 pci_request_acs();
716 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700717#endif
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900718#ifdef CONFIG_X86
719 if (ret)
720 x86_init.iommu.iommu_init = intel_iommu_init;
721#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700722 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800723 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700724 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700725}
726
727
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700728int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700729{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700730 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700731 int map_size;
732 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700733 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100734 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700735 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700736
David Woodhouse6ecbf012009-12-02 09:20:27 +0000737 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100738 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000739 return -EINVAL;
740 }
741
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700742 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
743 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700744 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700745
746 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700747 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700748
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700749 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700750 if (!iommu->reg) {
751 printk(KERN_ERR "IOMMU: can't map the region\n");
752 goto error;
753 }
754 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
755 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
756
David Woodhouse08155652009-08-04 09:17:20 +0100757 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100758 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
David Woodhouse08155652009-08-04 09:17:20 +0100759 goto err_unmap;
760 }
761
Joerg Roedel43f73922009-01-03 23:56:27 +0100762#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800763 agaw = iommu_calculate_agaw(iommu);
764 if (agaw < 0) {
765 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700766 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
767 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100768 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700769 }
770 msagaw = iommu_calculate_max_sagaw(iommu);
771 if (msagaw < 0) {
772 printk(KERN_ERR
773 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800774 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100775 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800776 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100777#endif
Weidong Han1b573682008-12-08 15:34:06 +0800778 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700779 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800780
Suresh Siddhaee34b322009-10-02 11:01:21 -0700781 iommu->node = -1;
782
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700783 /* the registers might be more than one page */
784 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
785 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700786 map_size = VTD_PAGE_ALIGN(map_size);
787 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700788 iounmap(iommu->reg);
789 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
790 if (!iommu->reg) {
791 printk(KERN_ERR "IOMMU: can't map the region\n");
792 goto error;
793 }
794 }
795
796 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100797 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
798 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700799 (unsigned long long)drhd->reg_base_addr,
800 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
801 (unsigned long long)iommu->cap,
802 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700803
804 spin_lock_init(&iommu->register_lock);
805
806 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700807 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100808
809 err_unmap:
810 iounmap(iommu->reg);
811 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700812 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700813 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700814}
815
816void free_iommu(struct intel_iommu *iommu)
817{
818 if (!iommu)
819 return;
820
821#ifdef CONFIG_DMAR
822 free_dmar_iommu(iommu);
823#endif
824
825 if (iommu->reg)
826 iounmap(iommu->reg);
827 kfree(iommu);
828}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700829
830/*
831 * Reclaim all the submitted descriptors which have completed its work.
832 */
833static inline void reclaim_free_desc(struct q_inval *qi)
834{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800835 while (qi->desc_status[qi->free_tail] == QI_DONE ||
836 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 qi->desc_status[qi->free_tail] = QI_FREE;
838 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
839 qi->free_cnt++;
840 }
841}
842
Yu Zhao704126a2009-01-04 16:28:52 +0800843static int qi_check_fault(struct intel_iommu *iommu, int index)
844{
845 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800846 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800847 struct q_inval *qi = iommu->qi;
848 int wait_index = (index + 1) % QI_LENGTH;
849
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800850 if (qi->desc_status[wait_index] == QI_ABORT)
851 return -EAGAIN;
852
Yu Zhao704126a2009-01-04 16:28:52 +0800853 fault = readl(iommu->reg + DMAR_FSTS_REG);
854
855 /*
856 * If IQE happens, the head points to the descriptor associated
857 * with the error. No new descriptors are fetched until the IQE
858 * is cleared.
859 */
860 if (fault & DMA_FSTS_IQE) {
861 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800862 if ((head >> DMAR_IQ_SHIFT) == index) {
863 printk(KERN_ERR "VT-d detected invalid descriptor: "
864 "low=%llx, high=%llx\n",
865 (unsigned long long)qi->desc[index].low,
866 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800867 memcpy(&qi->desc[index], &qi->desc[wait_index],
868 sizeof(struct qi_desc));
869 __iommu_flush_cache(iommu, &qi->desc[index],
870 sizeof(struct qi_desc));
871 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
872 return -EINVAL;
873 }
874 }
875
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800876 /*
877 * If ITE happens, all pending wait_desc commands are aborted.
878 * No new descriptors are fetched until the ITE is cleared.
879 */
880 if (fault & DMA_FSTS_ITE) {
881 head = readl(iommu->reg + DMAR_IQH_REG);
882 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
883 head |= 1;
884 tail = readl(iommu->reg + DMAR_IQT_REG);
885 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
886
887 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
888
889 do {
890 if (qi->desc_status[head] == QI_IN_USE)
891 qi->desc_status[head] = QI_ABORT;
892 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
893 } while (head != tail);
894
895 if (qi->desc_status[wait_index] == QI_ABORT)
896 return -EAGAIN;
897 }
898
899 if (fault & DMA_FSTS_ICE)
900 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
901
Yu Zhao704126a2009-01-04 16:28:52 +0800902 return 0;
903}
904
Suresh Siddhafe962e92008-07-10 11:16:42 -0700905/*
906 * Submit the queued invalidation descriptor to the remapping
907 * hardware unit and wait for its completion.
908 */
Yu Zhao704126a2009-01-04 16:28:52 +0800909int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700910{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800911 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700912 struct q_inval *qi = iommu->qi;
913 struct qi_desc *hw, wait_desc;
914 int wait_index, index;
915 unsigned long flags;
916
917 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800918 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700919
920 hw = qi->desc;
921
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800922restart:
923 rc = 0;
924
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700925 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700926 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700927 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700928 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700929 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700930 }
931
932 index = qi->free_head;
933 wait_index = (index + 1) % QI_LENGTH;
934
935 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
936
937 hw[index] = *desc;
938
Yu Zhao704126a2009-01-04 16:28:52 +0800939 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
940 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700941 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
942
943 hw[wait_index] = wait_desc;
944
945 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
946 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
947
948 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
949 qi->free_cnt -= 2;
950
Suresh Siddhafe962e92008-07-10 11:16:42 -0700951 /*
952 * update the HW tail register indicating the presence of
953 * new descriptors.
954 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800955 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700956
957 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700958 /*
959 * We will leave the interrupts disabled, to prevent interrupt
960 * context to queue another cmd while a cmd is already submitted
961 * and waiting for completion on this cpu. This is to avoid
962 * a deadlock where the interrupt context can wait indefinitely
963 * for free slots in the queue.
964 */
Yu Zhao704126a2009-01-04 16:28:52 +0800965 rc = qi_check_fault(iommu, index);
966 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800967 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800968
Suresh Siddhafe962e92008-07-10 11:16:42 -0700969 spin_unlock(&qi->q_lock);
970 cpu_relax();
971 spin_lock(&qi->q_lock);
972 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800973
974 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700975
976 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700977 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800978
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800979 if (rc == -EAGAIN)
980 goto restart;
981
Yu Zhao704126a2009-01-04 16:28:52 +0800982 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700983}
984
985/*
986 * Flush the global interrupt entry cache.
987 */
988void qi_global_iec(struct intel_iommu *iommu)
989{
990 struct qi_desc desc;
991
992 desc.low = QI_IEC_TYPE;
993 desc.high = 0;
994
Yu Zhao704126a2009-01-04 16:28:52 +0800995 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700996 qi_submit_sync(&desc, iommu);
997}
998
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100999void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1000 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001001{
Youquan Song3481f212008-10-16 16:31:55 -07001002 struct qi_desc desc;
1003
Youquan Song3481f212008-10-16 16:31:55 -07001004 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1005 | QI_CC_GRAN(type) | QI_CC_TYPE;
1006 desc.high = 0;
1007
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001008 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001009}
1010
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001011void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1012 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001013{
1014 u8 dw = 0, dr = 0;
1015
1016 struct qi_desc desc;
1017 int ih = 0;
1018
Youquan Song3481f212008-10-16 16:31:55 -07001019 if (cap_write_drain(iommu->cap))
1020 dw = 1;
1021
1022 if (cap_read_drain(iommu->cap))
1023 dr = 1;
1024
1025 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1026 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1027 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1028 | QI_IOTLB_AM(size_order);
1029
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001030 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001031}
1032
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001033void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1034 u64 addr, unsigned mask)
1035{
1036 struct qi_desc desc;
1037
1038 if (mask) {
1039 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1040 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1041 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1042 } else
1043 desc.high = QI_DEV_IOTLB_ADDR(addr);
1044
1045 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1046 qdep = 0;
1047
1048 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1049 QI_DIOTLB_TYPE;
1050
1051 qi_submit_sync(&desc, iommu);
1052}
1053
Suresh Siddhafe962e92008-07-10 11:16:42 -07001054/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001055 * Disable Queued Invalidation interface.
1056 */
1057void dmar_disable_qi(struct intel_iommu *iommu)
1058{
1059 unsigned long flags;
1060 u32 sts;
1061 cycles_t start_time = get_cycles();
1062
1063 if (!ecap_qis(iommu->ecap))
1064 return;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flags);
1067
1068 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1069 if (!(sts & DMA_GSTS_QIES))
1070 goto end;
1071
1072 /*
1073 * Give a chance to HW to complete the pending invalidation requests.
1074 */
1075 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1076 readl(iommu->reg + DMAR_IQH_REG)) &&
1077 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1078 cpu_relax();
1079
1080 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001081 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1082
1083 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1084 !(sts & DMA_GSTS_QIES), sts);
1085end:
1086 spin_unlock_irqrestore(&iommu->register_lock, flags);
1087}
1088
1089/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001090 * Enable queued invalidation.
1091 */
1092static void __dmar_enable_qi(struct intel_iommu *iommu)
1093{
David Woodhousec416daa2009-05-10 20:30:58 +01001094 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001095 unsigned long flags;
1096 struct q_inval *qi = iommu->qi;
1097
1098 qi->free_head = qi->free_tail = 0;
1099 qi->free_cnt = QI_LENGTH;
1100
1101 spin_lock_irqsave(&iommu->register_lock, flags);
1102
1103 /* write zero to the tail reg */
1104 writel(0, iommu->reg + DMAR_IQT_REG);
1105
1106 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1107
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001108 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001109 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001110
1111 /* Make sure hardware complete it */
1112 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1113
1114 spin_unlock_irqrestore(&iommu->register_lock, flags);
1115}
1116
1117/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001118 * Enable Queued Invalidation interface. This is a must to support
1119 * interrupt-remapping. Also used by DMA-remapping, which replaces
1120 * register based IOTLB invalidation.
1121 */
1122int dmar_enable_qi(struct intel_iommu *iommu)
1123{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001124 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001125 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001126
1127 if (!ecap_qis(iommu->ecap))
1128 return -ENOENT;
1129
1130 /*
1131 * queued invalidation is already setup and enabled.
1132 */
1133 if (iommu->qi)
1134 return 0;
1135
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001136 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001137 if (!iommu->qi)
1138 return -ENOMEM;
1139
1140 qi = iommu->qi;
1141
Suresh Siddha751cafe2009-10-02 11:01:22 -07001142
1143 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1144 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001145 kfree(qi);
1146 iommu->qi = 0;
1147 return -ENOMEM;
1148 }
1149
Suresh Siddha751cafe2009-10-02 11:01:22 -07001150 qi->desc = page_address(desc_page);
1151
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001152 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001153 if (!qi->desc_status) {
1154 free_page((unsigned long) qi->desc);
1155 kfree(qi);
1156 iommu->qi = 0;
1157 return -ENOMEM;
1158 }
1159
1160 qi->free_head = qi->free_tail = 0;
1161 qi->free_cnt = QI_LENGTH;
1162
1163 spin_lock_init(&qi->q_lock);
1164
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001165 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001166
1167 return 0;
1168}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001169
1170/* iommu interrupt handling. Most stuff are MSI-like. */
1171
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001172enum faulttype {
1173 DMA_REMAP,
1174 INTR_REMAP,
1175 UNKNOWN,
1176};
1177
1178static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001179{
1180 "Software",
1181 "Present bit in root entry is clear",
1182 "Present bit in context entry is clear",
1183 "Invalid context entry",
1184 "Access beyond MGAW",
1185 "PTE Write access is not set",
1186 "PTE Read access is not set",
1187 "Next page table ptr is invalid",
1188 "Root table address invalid",
1189 "Context table ptr is invalid",
1190 "non-zero reserved fields in RTP",
1191 "non-zero reserved fields in CTP",
1192 "non-zero reserved fields in PTE",
1193};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001194
1195static const char *intr_remap_fault_reasons[] =
1196{
1197 "Detected reserved fields in the decoded interrupt-remapped request",
1198 "Interrupt index exceeded the interrupt-remapping table size",
1199 "Present field in the IRTE entry is clear",
1200 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1201 "Detected reserved fields in the IRTE entry",
1202 "Blocked a compatibility format interrupt request",
1203 "Blocked an interrupt request due to source-id verification failure",
1204};
1205
Suresh Siddha0ac24912009-03-16 17:04:54 -07001206#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1207
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001208const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001209{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001210 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1211 ARRAY_SIZE(intr_remap_fault_reasons))) {
1212 *fault_type = INTR_REMAP;
1213 return intr_remap_fault_reasons[fault_reason - 0x20];
1214 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1215 *fault_type = DMA_REMAP;
1216 return dma_remap_fault_reasons[fault_reason];
1217 } else {
1218 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001219 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001220 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001221}
1222
1223void dmar_msi_unmask(unsigned int irq)
1224{
1225 struct intel_iommu *iommu = get_irq_data(irq);
1226 unsigned long flag;
1227
1228 /* unmask it */
1229 spin_lock_irqsave(&iommu->register_lock, flag);
1230 writel(0, iommu->reg + DMAR_FECTL_REG);
1231 /* Read a reg to force flush the post write */
1232 readl(iommu->reg + DMAR_FECTL_REG);
1233 spin_unlock_irqrestore(&iommu->register_lock, flag);
1234}
1235
1236void dmar_msi_mask(unsigned int irq)
1237{
1238 unsigned long flag;
1239 struct intel_iommu *iommu = get_irq_data(irq);
1240
1241 /* mask it */
1242 spin_lock_irqsave(&iommu->register_lock, flag);
1243 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1244 /* Read a reg to force flush the post write */
1245 readl(iommu->reg + DMAR_FECTL_REG);
1246 spin_unlock_irqrestore(&iommu->register_lock, flag);
1247}
1248
1249void dmar_msi_write(int irq, struct msi_msg *msg)
1250{
1251 struct intel_iommu *iommu = get_irq_data(irq);
1252 unsigned long flag;
1253
1254 spin_lock_irqsave(&iommu->register_lock, flag);
1255 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1256 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1257 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1258 spin_unlock_irqrestore(&iommu->register_lock, flag);
1259}
1260
1261void dmar_msi_read(int irq, struct msi_msg *msg)
1262{
1263 struct intel_iommu *iommu = get_irq_data(irq);
1264 unsigned long flag;
1265
1266 spin_lock_irqsave(&iommu->register_lock, flag);
1267 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1268 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1269 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1270 spin_unlock_irqrestore(&iommu->register_lock, flag);
1271}
1272
1273static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1274 u8 fault_reason, u16 source_id, unsigned long long addr)
1275{
1276 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001277 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001278
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001279 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001280
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001281 if (fault_type == INTR_REMAP)
1282 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1283 "fault index %llx\n"
1284 "INTR-REMAP:[fault reason %02d] %s\n",
1285 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1286 PCI_FUNC(source_id & 0xFF), addr >> 48,
1287 fault_reason, reason);
1288 else
1289 printk(KERN_ERR
1290 "DMAR:[%s] Request device [%02x:%02x.%d] "
1291 "fault addr %llx \n"
1292 "DMAR:[fault reason %02d] %s\n",
1293 (type ? "DMA Read" : "DMA Write"),
1294 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1295 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001296 return 0;
1297}
1298
1299#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001300irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001301{
1302 struct intel_iommu *iommu = dev_id;
1303 int reg, fault_index;
1304 u32 fault_status;
1305 unsigned long flag;
1306
1307 spin_lock_irqsave(&iommu->register_lock, flag);
1308 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001309 if (fault_status)
1310 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1311 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001312
1313 /* TBD: ignore advanced fault log currently */
1314 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001315 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001316
1317 fault_index = dma_fsts_fault_record_index(fault_status);
1318 reg = cap_fault_reg_offset(iommu->cap);
1319 while (1) {
1320 u8 fault_reason;
1321 u16 source_id;
1322 u64 guest_addr;
1323 int type;
1324 u32 data;
1325
1326 /* highest 32 bits */
1327 data = readl(iommu->reg + reg +
1328 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1329 if (!(data & DMA_FRCD_F))
1330 break;
1331
1332 fault_reason = dma_frcd_fault_reason(data);
1333 type = dma_frcd_type(data);
1334
1335 data = readl(iommu->reg + reg +
1336 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1337 source_id = dma_frcd_source_id(data);
1338
1339 guest_addr = dmar_readq(iommu->reg + reg +
1340 fault_index * PRIMARY_FAULT_REG_LEN);
1341 guest_addr = dma_frcd_page_addr(guest_addr);
1342 /* clear the fault */
1343 writel(DMA_FRCD_F, iommu->reg + reg +
1344 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1345
1346 spin_unlock_irqrestore(&iommu->register_lock, flag);
1347
1348 dmar_fault_do_one(iommu, type, fault_reason,
1349 source_id, guest_addr);
1350
1351 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001352 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001353 fault_index = 0;
1354 spin_lock_irqsave(&iommu->register_lock, flag);
1355 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001356clear_rest:
1357 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001358 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001359 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001360
1361 spin_unlock_irqrestore(&iommu->register_lock, flag);
1362 return IRQ_HANDLED;
1363}
1364
1365int dmar_set_interrupt(struct intel_iommu *iommu)
1366{
1367 int irq, ret;
1368
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001369 /*
1370 * Check if the fault interrupt is already initialized.
1371 */
1372 if (iommu->irq)
1373 return 0;
1374
Suresh Siddha0ac24912009-03-16 17:04:54 -07001375 irq = create_irq();
1376 if (!irq) {
1377 printk(KERN_ERR "IOMMU: no free vectors\n");
1378 return -EINVAL;
1379 }
1380
1381 set_irq_data(irq, iommu);
1382 iommu->irq = irq;
1383
1384 ret = arch_setup_dmar_msi(irq);
1385 if (ret) {
1386 set_irq_data(irq, NULL);
1387 iommu->irq = 0;
1388 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001389 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001390 }
1391
Suresh Siddha0ac24912009-03-16 17:04:54 -07001392 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1393 if (ret)
1394 printk(KERN_ERR "IOMMU: can't request irq\n");
1395 return ret;
1396}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001397
1398int __init enable_drhd_fault_handling(void)
1399{
1400 struct dmar_drhd_unit *drhd;
1401
1402 /*
1403 * Enable fault control interrupt.
1404 */
1405 for_each_drhd_unit(drhd) {
1406 int ret;
1407 struct intel_iommu *iommu = drhd->iommu;
1408 ret = dmar_set_interrupt(iommu);
1409
1410 if (ret) {
1411 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1412 " interrupt, ret %d\n",
1413 (unsigned long long)drhd->reg_base_addr, ret);
1414 return -1;
1415 }
1416 }
1417
1418 return 0;
1419}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001420
1421/*
1422 * Re-enable Queued Invalidation interface.
1423 */
1424int dmar_reenable_qi(struct intel_iommu *iommu)
1425{
1426 if (!ecap_qis(iommu->ecap))
1427 return -ENOENT;
1428
1429 if (!iommu->qi)
1430 return -ENOENT;
1431
1432 /*
1433 * First disable queued invalidation.
1434 */
1435 dmar_disable_qi(iommu);
1436 /*
1437 * Then enable queued invalidation again. Since there is no pending
1438 * invalidation requests now, it's safe to re-enable queued
1439 * invalidation.
1440 */
1441 __dmar_enable_qi(iommu);
1442
1443 return 0;
1444}
Youquan Song074835f2009-09-09 12:05:39 -04001445
1446/*
1447 * Check interrupt remapping support in DMAR table description.
1448 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001449int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001450{
1451 struct acpi_table_dmar *dmar;
1452 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001453 if (!dmar)
1454 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001455 return dmar->flags & 0x1;
1456}