blob: 1959cdadad51dcc23633368f812d72343656de12 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070038
Len Browna192a952009-07-28 16:45:54 -040039#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070040
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046
47static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080048static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070062static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134 printk(KERN_WARNING PREFIX
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 "Unsupported device scope\n");
136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177
David Woodhousee523b382009-04-10 22:27:48 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700183 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700184 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100185 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
David Woodhousef82851a2008-10-18 15:43:14 +0100197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700198{
199 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100200 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
Yu Zhao2e824f72008-12-22 16:54:58 +0800204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700211 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700213 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 return ret;
216}
217
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700218#ifdef CONFIG_DMAR
219LIST_HEAD(dmar_rmrr_units);
220
221static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222{
223 list_add(&rmrr->list, &dmar_rmrr_units);
224}
225
226
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700237 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700254 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700259 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700260 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700261 return ret;
262}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800263
264static LIST_HEAD(dmar_atsr_units);
265
266static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267{
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
270
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
273 if (!atsru)
274 return -ENOMEM;
275
276 atsru->hdr = hdr;
277 atsru->include_all = atsr->flags & 0x1;
278
279 list_add(&atsru->list, &dmar_atsr_units);
280
281 return 0;
282}
283
284static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285{
286 int rc;
287 struct acpi_dmar_atsr *atsr;
288
289 if (atsru->include_all)
290 return 0;
291
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
296 atsr->segment);
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
299 kfree(atsru);
300 }
301
302 return rc;
303}
304
305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306{
307 int i;
308 struct pci_bus *bus;
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
311
Yinghaidda56542010-04-09 01:07:55 +0100312 dev = pci_physfn(dev);
313
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
317 goto found;
318 }
319
320 return 0;
321
322found:
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
325
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900326 if (!bridge || !pci_is_pcie(bridge) ||
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
328 return 0;
329
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
333 return 1;
334 break;
335 }
336 }
337
338 if (atsru->include_all)
339 return 1;
340
341 return 0;
342}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700343#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700344
David Woodhouseaa697072009-10-07 12:18:00 +0100345#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700346static int __init
347dmar_parse_one_rhsa(struct acpi_dmar_header *header)
348{
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
351
352 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100353 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
356
357 if (!node_online(node))
358 node = -1;
359 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100360 return 0;
361 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700362 }
David Woodhouseaa697072009-10-07 12:18:00 +0100363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
365 drhd->reg_base_addr,
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700369
David Woodhouseaa697072009-10-07 12:18:00 +0100370 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700371}
David Woodhouseaa697072009-10-07 12:18:00 +0100372#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700373
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700374static void __init
375dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
376{
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800379 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700380 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700381
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
385 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700386 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700389 break;
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
392 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700393 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800394 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700397 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
401 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
407 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700408 }
409}
410
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700411/**
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
413 */
414static int __init dmar_table_detect(void)
415{
416 acpi_status status = AE_OK;
417
418 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
421 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700422
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
426 }
427
428 return (ACPI_SUCCESS(status) ? 1 : 0);
429}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700430
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700431/**
432 * parse_dmar_table - parses the DMA reporting table
433 */
434static int __init
435parse_dmar_table(void)
436{
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
439 int ret = 0;
440
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700441 /*
442 * Do it again, earlier dmar_tbl mapping could be mapped with
443 * fixed map.
444 */
445 dmar_table_detect();
446
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700447 /*
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
450 */
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
452
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700453 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (!dmar)
455 return -ENODEV;
456
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700457 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700459 return -EINVAL;
460 }
461
462 printk (KERN_INFO PREFIX "Host address width %d\n",
463 dmar->width + 1);
464
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
472 ret = -EINVAL;
473 break;
474 }
475
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700476 dmar_table_print_dmar_entry(entry_header);
477
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
481 break;
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700483#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700484 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700485#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700486 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800487 case ACPI_DMAR_TYPE_ATSR:
488#ifdef CONFIG_DMAR
489 ret = dmar_parse_one_atsr(entry_header);
490#endif
491 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700492 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100493#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700494 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100495#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700496 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700497 default:
498 printk(KERN_WARNING PREFIX
Roland Dreier4de75cf2009-09-24 01:01:29 +0100499 "Unknown DMAR structure type %d\n",
500 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700501 ret = 0; /* for forward compatibility */
502 break;
503 }
504 if (ret)
505 break;
506
507 entry_header = ((void *)entry_header + entry_header->length);
508 }
509 return ret;
510}
511
Yinghaidda56542010-04-09 01:07:55 +0100512static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700513 struct pci_dev *dev)
514{
515 int index;
516
517 while (dev) {
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
520 return 1;
521
522 /* Check our parent */
523 dev = dev->bus->self;
524 }
525
526 return 0;
527}
528
529struct dmar_drhd_unit *
530dmar_find_matched_drhd_unit(struct pci_dev *dev)
531{
Yu Zhao2e824f72008-12-22 16:54:58 +0800532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700534
Yinghaidda56542010-04-09 01:07:55 +0100535 dev = pci_physfn(dev);
536
Yu Zhao2e824f72008-12-22 16:54:58 +0800537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
540 header);
541
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
544 return dmaru;
545
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
548 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700549 }
550
551 return NULL;
552}
553
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700554int __init dmar_dev_scope_init(void)
555{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700556 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700557 int ret = -ENODEV;
558
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700560 ret = dmar_parse_dev(drhd);
561 if (ret)
562 return ret;
563 }
564
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700565#ifdef CONFIG_DMAR
566 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800568 struct dmar_atsr_unit *atsr, *atsr_n;
569
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700571 ret = rmrr_parse_dev(rmrr);
572 if (ret)
573 return ret;
574 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800575
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
578 if (ret)
579 return ret;
580 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700581 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700582#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700583
584 return ret;
585}
586
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700587
588int __init dmar_table_init(void)
589{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700590 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800591 int ret;
592
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700593 if (dmar_table_initialized)
594 return 0;
595
596 dmar_table_initialized = 1;
597
Fenghua Yu093f87d2007-11-21 15:07:14 -0800598 ret = parse_dmar_table();
599 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700600 if (ret != -ENODEV)
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800602 return ret;
603 }
604
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
607 return -ENODEV;
608 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800609
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700610#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700611 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800612 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800613
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700616#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800617
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700618 return 0;
619}
620
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100621static void warn_invalid_dmar(u64 addr, const char *message)
622{
623 WARN_ONCE(1, "Your BIOS is broken; DMAR reported at address %llx%s!\n"
624 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
625 addr, message,
626 dmi_get_system_info(DMI_BIOS_VENDOR),
627 dmi_get_system_info(DMI_BIOS_VERSION),
628 dmi_get_system_info(DMI_PRODUCT_VERSION));
629}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000630
David Woodhouse86cf8982009-11-09 22:15:15 +0000631int __init check_zero_address(void)
632{
633 struct acpi_table_dmar *dmar;
634 struct acpi_dmar_header *entry_header;
635 struct acpi_dmar_hardware_unit *drhd;
636
637 dmar = (struct acpi_table_dmar *)dmar_tbl;
638 entry_header = (struct acpi_dmar_header *)(dmar + 1);
639
640 while (((unsigned long)entry_header) <
641 (((unsigned long)dmar) + dmar_tbl->length)) {
642 /* Avoid looping forever on bad ACPI tables */
643 if (entry_header->length == 0) {
644 printk(KERN_WARNING PREFIX
645 "Invalid 0-length structure\n");
646 return 0;
647 }
648
649 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000650 void __iomem *addr;
651 u64 cap, ecap;
652
David Woodhouse86cf8982009-11-09 22:15:15 +0000653 drhd = (void *)entry_header;
654 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100655 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000656 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000657 }
Chris Wright2c992202009-12-02 09:17:13 +0000658
659 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
660 if (!addr ) {
661 printk("IOMMU: can't validate: %llx\n", drhd->address);
662 goto failed;
663 }
664 cap = dmar_readq(addr + DMAR_CAP_REG);
665 ecap = dmar_readq(addr + DMAR_ECAP_REG);
666 early_iounmap(addr, VTD_PAGE_SIZE);
667 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100668 warn_invalid_dmar(drhd->address,
669 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000670 goto failed;
671 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000672 }
673
674 entry_header = ((void *)entry_header + entry_header->length);
675 }
676 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000677
678failed:
679#ifdef CONFIG_DMAR
680 dmar_disabled = 1;
681#endif
682 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000683}
684
Suresh Siddha2ae21012008-07-10 11:16:43 -0700685void __init detect_intel_iommu(void)
686{
687 int ret;
688
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700689 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000690 if (ret)
691 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700692 {
Youquan Songcacd4212008-10-16 16:31:57 -0700693#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700694 struct acpi_table_dmar *dmar;
695 /*
696 * for now we will disable dma-remapping when interrupt
697 * remapping is enabled.
698 * When support for queued invalidation for IOTLB invalidation
699 * is added, we will not need this any more.
700 */
701 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700702 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700703 printk(KERN_INFO
704 "Queued invalidation will be enabled to support "
705 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700706#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700707#ifdef CONFIG_DMAR
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800708 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700709 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800710 /* Make sure ACS will be enabled */
711 pci_request_acs();
712 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700713#endif
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900714#ifdef CONFIG_X86
715 if (ret)
716 x86_init.iommu.iommu_init = intel_iommu_init;
717#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700718 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800719 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700720 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700721}
722
723
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700724int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700725{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700726 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700727 int map_size;
728 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700729 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100730 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700731 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700732
David Woodhouse6ecbf012009-12-02 09:20:27 +0000733 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100734 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000735 return -EINVAL;
736 }
737
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700738 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
739 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700740 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700741
742 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700743 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700744
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700745 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700746 if (!iommu->reg) {
747 printk(KERN_ERR "IOMMU: can't map the region\n");
748 goto error;
749 }
750 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
751 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
752
David Woodhouse08155652009-08-04 09:17:20 +0100753 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100754 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
David Woodhouse08155652009-08-04 09:17:20 +0100755 goto err_unmap;
756 }
757
Joerg Roedel43f73922009-01-03 23:56:27 +0100758#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800759 agaw = iommu_calculate_agaw(iommu);
760 if (agaw < 0) {
761 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700762 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
763 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100764 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700765 }
766 msagaw = iommu_calculate_max_sagaw(iommu);
767 if (msagaw < 0) {
768 printk(KERN_ERR
769 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800770 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100771 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800772 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100773#endif
Weidong Han1b573682008-12-08 15:34:06 +0800774 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700775 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800776
Suresh Siddhaee34b322009-10-02 11:01:21 -0700777 iommu->node = -1;
778
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700779 /* the registers might be more than one page */
780 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
781 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700782 map_size = VTD_PAGE_ALIGN(map_size);
783 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700784 iounmap(iommu->reg);
785 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
786 if (!iommu->reg) {
787 printk(KERN_ERR "IOMMU: can't map the region\n");
788 goto error;
789 }
790 }
791
792 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100793 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
794 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700795 (unsigned long long)drhd->reg_base_addr,
796 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
797 (unsigned long long)iommu->cap,
798 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700799
800 spin_lock_init(&iommu->register_lock);
801
802 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700803 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100804
805 err_unmap:
806 iounmap(iommu->reg);
807 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700808 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700809 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700810}
811
812void free_iommu(struct intel_iommu *iommu)
813{
814 if (!iommu)
815 return;
816
817#ifdef CONFIG_DMAR
818 free_dmar_iommu(iommu);
819#endif
820
821 if (iommu->reg)
822 iounmap(iommu->reg);
823 kfree(iommu);
824}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700825
826/*
827 * Reclaim all the submitted descriptors which have completed its work.
828 */
829static inline void reclaim_free_desc(struct q_inval *qi)
830{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800831 while (qi->desc_status[qi->free_tail] == QI_DONE ||
832 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700833 qi->desc_status[qi->free_tail] = QI_FREE;
834 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
835 qi->free_cnt++;
836 }
837}
838
Yu Zhao704126a2009-01-04 16:28:52 +0800839static int qi_check_fault(struct intel_iommu *iommu, int index)
840{
841 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800842 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800843 struct q_inval *qi = iommu->qi;
844 int wait_index = (index + 1) % QI_LENGTH;
845
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800846 if (qi->desc_status[wait_index] == QI_ABORT)
847 return -EAGAIN;
848
Yu Zhao704126a2009-01-04 16:28:52 +0800849 fault = readl(iommu->reg + DMAR_FSTS_REG);
850
851 /*
852 * If IQE happens, the head points to the descriptor associated
853 * with the error. No new descriptors are fetched until the IQE
854 * is cleared.
855 */
856 if (fault & DMA_FSTS_IQE) {
857 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800858 if ((head >> DMAR_IQ_SHIFT) == index) {
859 printk(KERN_ERR "VT-d detected invalid descriptor: "
860 "low=%llx, high=%llx\n",
861 (unsigned long long)qi->desc[index].low,
862 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800863 memcpy(&qi->desc[index], &qi->desc[wait_index],
864 sizeof(struct qi_desc));
865 __iommu_flush_cache(iommu, &qi->desc[index],
866 sizeof(struct qi_desc));
867 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
868 return -EINVAL;
869 }
870 }
871
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800872 /*
873 * If ITE happens, all pending wait_desc commands are aborted.
874 * No new descriptors are fetched until the ITE is cleared.
875 */
876 if (fault & DMA_FSTS_ITE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
878 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
879 head |= 1;
880 tail = readl(iommu->reg + DMAR_IQT_REG);
881 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
882
883 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
884
885 do {
886 if (qi->desc_status[head] == QI_IN_USE)
887 qi->desc_status[head] = QI_ABORT;
888 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
889 } while (head != tail);
890
891 if (qi->desc_status[wait_index] == QI_ABORT)
892 return -EAGAIN;
893 }
894
895 if (fault & DMA_FSTS_ICE)
896 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
897
Yu Zhao704126a2009-01-04 16:28:52 +0800898 return 0;
899}
900
Suresh Siddhafe962e92008-07-10 11:16:42 -0700901/*
902 * Submit the queued invalidation descriptor to the remapping
903 * hardware unit and wait for its completion.
904 */
Yu Zhao704126a2009-01-04 16:28:52 +0800905int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700906{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800907 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700908 struct q_inval *qi = iommu->qi;
909 struct qi_desc *hw, wait_desc;
910 int wait_index, index;
911 unsigned long flags;
912
913 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800914 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700915
916 hw = qi->desc;
917
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800918restart:
919 rc = 0;
920
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700921 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700922 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700923 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700924 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700925 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700926 }
927
928 index = qi->free_head;
929 wait_index = (index + 1) % QI_LENGTH;
930
931 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
932
933 hw[index] = *desc;
934
Yu Zhao704126a2009-01-04 16:28:52 +0800935 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
936 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700937 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
938
939 hw[wait_index] = wait_desc;
940
941 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
942 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
943
944 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
945 qi->free_cnt -= 2;
946
Suresh Siddhafe962e92008-07-10 11:16:42 -0700947 /*
948 * update the HW tail register indicating the presence of
949 * new descriptors.
950 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800951 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700952
953 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700954 /*
955 * We will leave the interrupts disabled, to prevent interrupt
956 * context to queue another cmd while a cmd is already submitted
957 * and waiting for completion on this cpu. This is to avoid
958 * a deadlock where the interrupt context can wait indefinitely
959 * for free slots in the queue.
960 */
Yu Zhao704126a2009-01-04 16:28:52 +0800961 rc = qi_check_fault(iommu, index);
962 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800963 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800964
Suresh Siddhafe962e92008-07-10 11:16:42 -0700965 spin_unlock(&qi->q_lock);
966 cpu_relax();
967 spin_lock(&qi->q_lock);
968 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800969
970 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700971
972 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700973 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800974
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800975 if (rc == -EAGAIN)
976 goto restart;
977
Yu Zhao704126a2009-01-04 16:28:52 +0800978 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700979}
980
981/*
982 * Flush the global interrupt entry cache.
983 */
984void qi_global_iec(struct intel_iommu *iommu)
985{
986 struct qi_desc desc;
987
988 desc.low = QI_IEC_TYPE;
989 desc.high = 0;
990
Yu Zhao704126a2009-01-04 16:28:52 +0800991 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700992 qi_submit_sync(&desc, iommu);
993}
994
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100995void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
996 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700997{
Youquan Song3481f212008-10-16 16:31:55 -0700998 struct qi_desc desc;
999
Youquan Song3481f212008-10-16 16:31:55 -07001000 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1001 | QI_CC_GRAN(type) | QI_CC_TYPE;
1002 desc.high = 0;
1003
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001004 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001005}
1006
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001007void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1008 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001009{
1010 u8 dw = 0, dr = 0;
1011
1012 struct qi_desc desc;
1013 int ih = 0;
1014
Youquan Song3481f212008-10-16 16:31:55 -07001015 if (cap_write_drain(iommu->cap))
1016 dw = 1;
1017
1018 if (cap_read_drain(iommu->cap))
1019 dr = 1;
1020
1021 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1022 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1023 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1024 | QI_IOTLB_AM(size_order);
1025
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001026 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001027}
1028
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001029void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1030 u64 addr, unsigned mask)
1031{
1032 struct qi_desc desc;
1033
1034 if (mask) {
1035 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1036 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1037 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1038 } else
1039 desc.high = QI_DEV_IOTLB_ADDR(addr);
1040
1041 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1042 qdep = 0;
1043
1044 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1045 QI_DIOTLB_TYPE;
1046
1047 qi_submit_sync(&desc, iommu);
1048}
1049
Suresh Siddhafe962e92008-07-10 11:16:42 -07001050/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001051 * Disable Queued Invalidation interface.
1052 */
1053void dmar_disable_qi(struct intel_iommu *iommu)
1054{
1055 unsigned long flags;
1056 u32 sts;
1057 cycles_t start_time = get_cycles();
1058
1059 if (!ecap_qis(iommu->ecap))
1060 return;
1061
1062 spin_lock_irqsave(&iommu->register_lock, flags);
1063
1064 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1065 if (!(sts & DMA_GSTS_QIES))
1066 goto end;
1067
1068 /*
1069 * Give a chance to HW to complete the pending invalidation requests.
1070 */
1071 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1072 readl(iommu->reg + DMAR_IQH_REG)) &&
1073 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1074 cpu_relax();
1075
1076 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001077 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1078
1079 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1080 !(sts & DMA_GSTS_QIES), sts);
1081end:
1082 spin_unlock_irqrestore(&iommu->register_lock, flags);
1083}
1084
1085/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001086 * Enable queued invalidation.
1087 */
1088static void __dmar_enable_qi(struct intel_iommu *iommu)
1089{
David Woodhousec416daa2009-05-10 20:30:58 +01001090 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001091 unsigned long flags;
1092 struct q_inval *qi = iommu->qi;
1093
1094 qi->free_head = qi->free_tail = 0;
1095 qi->free_cnt = QI_LENGTH;
1096
1097 spin_lock_irqsave(&iommu->register_lock, flags);
1098
1099 /* write zero to the tail reg */
1100 writel(0, iommu->reg + DMAR_IQT_REG);
1101
1102 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1103
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001104 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001105 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001106
1107 /* Make sure hardware complete it */
1108 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1109
1110 spin_unlock_irqrestore(&iommu->register_lock, flags);
1111}
1112
1113/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001114 * Enable Queued Invalidation interface. This is a must to support
1115 * interrupt-remapping. Also used by DMA-remapping, which replaces
1116 * register based IOTLB invalidation.
1117 */
1118int dmar_enable_qi(struct intel_iommu *iommu)
1119{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001120 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001121 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001122
1123 if (!ecap_qis(iommu->ecap))
1124 return -ENOENT;
1125
1126 /*
1127 * queued invalidation is already setup and enabled.
1128 */
1129 if (iommu->qi)
1130 return 0;
1131
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001132 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001133 if (!iommu->qi)
1134 return -ENOMEM;
1135
1136 qi = iommu->qi;
1137
Suresh Siddha751cafe2009-10-02 11:01:22 -07001138
1139 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1140 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001141 kfree(qi);
1142 iommu->qi = 0;
1143 return -ENOMEM;
1144 }
1145
Suresh Siddha751cafe2009-10-02 11:01:22 -07001146 qi->desc = page_address(desc_page);
1147
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001148 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001149 if (!qi->desc_status) {
1150 free_page((unsigned long) qi->desc);
1151 kfree(qi);
1152 iommu->qi = 0;
1153 return -ENOMEM;
1154 }
1155
1156 qi->free_head = qi->free_tail = 0;
1157 qi->free_cnt = QI_LENGTH;
1158
1159 spin_lock_init(&qi->q_lock);
1160
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001161 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001162
1163 return 0;
1164}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165
1166/* iommu interrupt handling. Most stuff are MSI-like. */
1167
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001168enum faulttype {
1169 DMA_REMAP,
1170 INTR_REMAP,
1171 UNKNOWN,
1172};
1173
1174static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001175{
1176 "Software",
1177 "Present bit in root entry is clear",
1178 "Present bit in context entry is clear",
1179 "Invalid context entry",
1180 "Access beyond MGAW",
1181 "PTE Write access is not set",
1182 "PTE Read access is not set",
1183 "Next page table ptr is invalid",
1184 "Root table address invalid",
1185 "Context table ptr is invalid",
1186 "non-zero reserved fields in RTP",
1187 "non-zero reserved fields in CTP",
1188 "non-zero reserved fields in PTE",
1189};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001190
1191static const char *intr_remap_fault_reasons[] =
1192{
1193 "Detected reserved fields in the decoded interrupt-remapped request",
1194 "Interrupt index exceeded the interrupt-remapping table size",
1195 "Present field in the IRTE entry is clear",
1196 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1197 "Detected reserved fields in the IRTE entry",
1198 "Blocked a compatibility format interrupt request",
1199 "Blocked an interrupt request due to source-id verification failure",
1200};
1201
Suresh Siddha0ac24912009-03-16 17:04:54 -07001202#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1203
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001204const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001205{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001206 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1207 ARRAY_SIZE(intr_remap_fault_reasons))) {
1208 *fault_type = INTR_REMAP;
1209 return intr_remap_fault_reasons[fault_reason - 0x20];
1210 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1211 *fault_type = DMA_REMAP;
1212 return dma_remap_fault_reasons[fault_reason];
1213 } else {
1214 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001215 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001216 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001217}
1218
1219void dmar_msi_unmask(unsigned int irq)
1220{
1221 struct intel_iommu *iommu = get_irq_data(irq);
1222 unsigned long flag;
1223
1224 /* unmask it */
1225 spin_lock_irqsave(&iommu->register_lock, flag);
1226 writel(0, iommu->reg + DMAR_FECTL_REG);
1227 /* Read a reg to force flush the post write */
1228 readl(iommu->reg + DMAR_FECTL_REG);
1229 spin_unlock_irqrestore(&iommu->register_lock, flag);
1230}
1231
1232void dmar_msi_mask(unsigned int irq)
1233{
1234 unsigned long flag;
1235 struct intel_iommu *iommu = get_irq_data(irq);
1236
1237 /* mask it */
1238 spin_lock_irqsave(&iommu->register_lock, flag);
1239 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1240 /* Read a reg to force flush the post write */
1241 readl(iommu->reg + DMAR_FECTL_REG);
1242 spin_unlock_irqrestore(&iommu->register_lock, flag);
1243}
1244
1245void dmar_msi_write(int irq, struct msi_msg *msg)
1246{
1247 struct intel_iommu *iommu = get_irq_data(irq);
1248 unsigned long flag;
1249
1250 spin_lock_irqsave(&iommu->register_lock, flag);
1251 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1252 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1253 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1254 spin_unlock_irqrestore(&iommu->register_lock, flag);
1255}
1256
1257void dmar_msi_read(int irq, struct msi_msg *msg)
1258{
1259 struct intel_iommu *iommu = get_irq_data(irq);
1260 unsigned long flag;
1261
1262 spin_lock_irqsave(&iommu->register_lock, flag);
1263 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1264 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1265 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1266 spin_unlock_irqrestore(&iommu->register_lock, flag);
1267}
1268
1269static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1270 u8 fault_reason, u16 source_id, unsigned long long addr)
1271{
1272 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001273 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001274
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001275 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001276
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001277 if (fault_type == INTR_REMAP)
1278 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1279 "fault index %llx\n"
1280 "INTR-REMAP:[fault reason %02d] %s\n",
1281 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1282 PCI_FUNC(source_id & 0xFF), addr >> 48,
1283 fault_reason, reason);
1284 else
1285 printk(KERN_ERR
1286 "DMAR:[%s] Request device [%02x:%02x.%d] "
1287 "fault addr %llx \n"
1288 "DMAR:[fault reason %02d] %s\n",
1289 (type ? "DMA Read" : "DMA Write"),
1290 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1291 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001292 return 0;
1293}
1294
1295#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001296irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001297{
1298 struct intel_iommu *iommu = dev_id;
1299 int reg, fault_index;
1300 u32 fault_status;
1301 unsigned long flag;
1302
1303 spin_lock_irqsave(&iommu->register_lock, flag);
1304 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001305 if (fault_status)
1306 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1307 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001308
1309 /* TBD: ignore advanced fault log currently */
1310 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001311 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001312
1313 fault_index = dma_fsts_fault_record_index(fault_status);
1314 reg = cap_fault_reg_offset(iommu->cap);
1315 while (1) {
1316 u8 fault_reason;
1317 u16 source_id;
1318 u64 guest_addr;
1319 int type;
1320 u32 data;
1321
1322 /* highest 32 bits */
1323 data = readl(iommu->reg + reg +
1324 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1325 if (!(data & DMA_FRCD_F))
1326 break;
1327
1328 fault_reason = dma_frcd_fault_reason(data);
1329 type = dma_frcd_type(data);
1330
1331 data = readl(iommu->reg + reg +
1332 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1333 source_id = dma_frcd_source_id(data);
1334
1335 guest_addr = dmar_readq(iommu->reg + reg +
1336 fault_index * PRIMARY_FAULT_REG_LEN);
1337 guest_addr = dma_frcd_page_addr(guest_addr);
1338 /* clear the fault */
1339 writel(DMA_FRCD_F, iommu->reg + reg +
1340 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1341
1342 spin_unlock_irqrestore(&iommu->register_lock, flag);
1343
1344 dmar_fault_do_one(iommu, type, fault_reason,
1345 source_id, guest_addr);
1346
1347 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001348 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001349 fault_index = 0;
1350 spin_lock_irqsave(&iommu->register_lock, flag);
1351 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001352clear_rest:
1353 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001354 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001355 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001356
1357 spin_unlock_irqrestore(&iommu->register_lock, flag);
1358 return IRQ_HANDLED;
1359}
1360
1361int dmar_set_interrupt(struct intel_iommu *iommu)
1362{
1363 int irq, ret;
1364
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001365 /*
1366 * Check if the fault interrupt is already initialized.
1367 */
1368 if (iommu->irq)
1369 return 0;
1370
Suresh Siddha0ac24912009-03-16 17:04:54 -07001371 irq = create_irq();
1372 if (!irq) {
1373 printk(KERN_ERR "IOMMU: no free vectors\n");
1374 return -EINVAL;
1375 }
1376
1377 set_irq_data(irq, iommu);
1378 iommu->irq = irq;
1379
1380 ret = arch_setup_dmar_msi(irq);
1381 if (ret) {
1382 set_irq_data(irq, NULL);
1383 iommu->irq = 0;
1384 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001385 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001386 }
1387
Suresh Siddha0ac24912009-03-16 17:04:54 -07001388 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1389 if (ret)
1390 printk(KERN_ERR "IOMMU: can't request irq\n");
1391 return ret;
1392}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001393
1394int __init enable_drhd_fault_handling(void)
1395{
1396 struct dmar_drhd_unit *drhd;
1397
1398 /*
1399 * Enable fault control interrupt.
1400 */
1401 for_each_drhd_unit(drhd) {
1402 int ret;
1403 struct intel_iommu *iommu = drhd->iommu;
1404 ret = dmar_set_interrupt(iommu);
1405
1406 if (ret) {
1407 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1408 " interrupt, ret %d\n",
1409 (unsigned long long)drhd->reg_base_addr, ret);
1410 return -1;
1411 }
1412 }
1413
1414 return 0;
1415}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001416
1417/*
1418 * Re-enable Queued Invalidation interface.
1419 */
1420int dmar_reenable_qi(struct intel_iommu *iommu)
1421{
1422 if (!ecap_qis(iommu->ecap))
1423 return -ENOENT;
1424
1425 if (!iommu->qi)
1426 return -ENOENT;
1427
1428 /*
1429 * First disable queued invalidation.
1430 */
1431 dmar_disable_qi(iommu);
1432 /*
1433 * Then enable queued invalidation again. Since there is no pending
1434 * invalidation requests now, it's safe to re-enable queued
1435 * invalidation.
1436 */
1437 __dmar_enable_qi(iommu);
1438
1439 return 0;
1440}
Youquan Song074835f2009-09-09 12:05:39 -04001441
1442/*
1443 * Check interrupt remapping support in DMAR table description.
1444 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001445int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001446{
1447 struct acpi_table_dmar *dmar;
1448 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001449 if (!dmar)
1450 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001451 return dmar->flags & 0x1;
1452}