blob: edc5f002e055a98fe6455301218303dd65850aa3 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070038
Len Browna192a952009-07-28 16:45:54 -040039#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070040
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046
47static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080048static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070062static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134 printk(KERN_WARNING PREFIX
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 "Unsupported device scope\n");
136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177
David Woodhousee523b382009-04-10 22:27:48 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700183 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700184 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100185 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
David Woodhousef82851a2008-10-18 15:43:14 +0100197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700198{
199 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100200 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
Yu Zhao2e824f72008-12-22 16:54:58 +0800204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700211 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700213 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 return ret;
216}
217
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700218#ifdef CONFIG_DMAR
219LIST_HEAD(dmar_rmrr_units);
220
221static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222{
223 list_add(&rmrr->list, &dmar_rmrr_units);
224}
225
226
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700237 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700254 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700259 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700260 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700261 return ret;
262}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800263
264static LIST_HEAD(dmar_atsr_units);
265
266static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267{
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
270
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
273 if (!atsru)
274 return -ENOMEM;
275
276 atsru->hdr = hdr;
277 atsru->include_all = atsr->flags & 0x1;
278
279 list_add(&atsru->list, &dmar_atsr_units);
280
281 return 0;
282}
283
284static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285{
286 int rc;
287 struct acpi_dmar_atsr *atsr;
288
289 if (atsru->include_all)
290 return 0;
291
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
296 atsr->segment);
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
299 kfree(atsru);
300 }
301
302 return rc;
303}
304
305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306{
307 int i;
308 struct pci_bus *bus;
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
311
Yinghaidda56542010-04-09 01:07:55 +0100312 dev = pci_physfn(dev);
313
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
317 goto found;
318 }
319
320 return 0;
321
322found:
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
325
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900326 if (!bridge || !pci_is_pcie(bridge) ||
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
328 return 0;
329
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
333 return 1;
334 break;
335 }
336 }
337
338 if (atsru->include_all)
339 return 1;
340
341 return 0;
342}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700343#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700344
David Woodhouseaa697072009-10-07 12:18:00 +0100345#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700346static int __init
347dmar_parse_one_rhsa(struct acpi_dmar_header *header)
348{
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
351
352 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100353 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
356
357 if (!node_online(node))
358 node = -1;
359 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100360 return 0;
361 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700362 }
David Woodhouseaa697072009-10-07 12:18:00 +0100363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
365 drhd->reg_base_addr,
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700369
David Woodhouseaa697072009-10-07 12:18:00 +0100370 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700371}
David Woodhouseaa697072009-10-07 12:18:00 +0100372#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700373
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700374static void __init
375dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
376{
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800379 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700380 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700381
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
385 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700386 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700389 break;
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
392 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700393 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800394 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700397 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
401 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
407 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700408 }
409}
410
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700411/**
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
413 */
414static int __init dmar_table_detect(void)
415{
416 acpi_status status = AE_OK;
417
418 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
421 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700422
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
426 }
427
428 return (ACPI_SUCCESS(status) ? 1 : 0);
429}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700430
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700431/**
432 * parse_dmar_table - parses the DMA reporting table
433 */
434static int __init
435parse_dmar_table(void)
436{
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
439 int ret = 0;
440
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700441 /*
442 * Do it again, earlier dmar_tbl mapping could be mapped with
443 * fixed map.
444 */
445 dmar_table_detect();
446
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700447 /*
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
450 */
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
452
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700453 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (!dmar)
455 return -ENODEV;
456
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700457 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700459 return -EINVAL;
460 }
461
462 printk (KERN_INFO PREFIX "Host address width %d\n",
463 dmar->width + 1);
464
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
472 ret = -EINVAL;
473 break;
474 }
475
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700476 dmar_table_print_dmar_entry(entry_header);
477
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
481 break;
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700483#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700484 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700485#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700486 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800487 case ACPI_DMAR_TYPE_ATSR:
488#ifdef CONFIG_DMAR
489 ret = dmar_parse_one_atsr(entry_header);
490#endif
491 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700492 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100493#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700494 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100495#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700496 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700497 default:
498 printk(KERN_WARNING PREFIX
Roland Dreier4de75cf2009-09-24 01:01:29 +0100499 "Unknown DMAR structure type %d\n",
500 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700501 ret = 0; /* for forward compatibility */
502 break;
503 }
504 if (ret)
505 break;
506
507 entry_header = ((void *)entry_header + entry_header->length);
508 }
509 return ret;
510}
511
Yinghaidda56542010-04-09 01:07:55 +0100512static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700513 struct pci_dev *dev)
514{
515 int index;
516
517 while (dev) {
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
520 return 1;
521
522 /* Check our parent */
523 dev = dev->bus->self;
524 }
525
526 return 0;
527}
528
529struct dmar_drhd_unit *
530dmar_find_matched_drhd_unit(struct pci_dev *dev)
531{
Yu Zhao2e824f72008-12-22 16:54:58 +0800532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700534
Yinghaidda56542010-04-09 01:07:55 +0100535 dev = pci_physfn(dev);
536
Yu Zhao2e824f72008-12-22 16:54:58 +0800537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
540 header);
541
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
544 return dmaru;
545
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
548 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700549 }
550
551 return NULL;
552}
553
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700554int __init dmar_dev_scope_init(void)
555{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700556 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700557 int ret = -ENODEV;
558
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700560 ret = dmar_parse_dev(drhd);
561 if (ret)
562 return ret;
563 }
564
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700565#ifdef CONFIG_DMAR
566 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800568 struct dmar_atsr_unit *atsr, *atsr_n;
569
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700571 ret = rmrr_parse_dev(rmrr);
572 if (ret)
573 return ret;
574 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800575
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
578 if (ret)
579 return ret;
580 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700581 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700582#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700583
584 return ret;
585}
586
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700587
588int __init dmar_table_init(void)
589{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700590 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800591 int ret;
592
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700593 if (dmar_table_initialized)
594 return 0;
595
596 dmar_table_initialized = 1;
597
Fenghua Yu093f87d2007-11-21 15:07:14 -0800598 ret = parse_dmar_table();
599 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700600 if (ret != -ENODEV)
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800602 return ret;
603 }
604
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
607 return -ENODEV;
608 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800609
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700610#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700611 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800612 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800613
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700616#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800617
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700618 return 0;
619}
620
David Woodhouse6ecbf012009-12-02 09:20:27 +0000621static int bios_warned;
622
David Woodhouse86cf8982009-11-09 22:15:15 +0000623int __init check_zero_address(void)
624{
625 struct acpi_table_dmar *dmar;
626 struct acpi_dmar_header *entry_header;
627 struct acpi_dmar_hardware_unit *drhd;
628
629 dmar = (struct acpi_table_dmar *)dmar_tbl;
630 entry_header = (struct acpi_dmar_header *)(dmar + 1);
631
632 while (((unsigned long)entry_header) <
633 (((unsigned long)dmar) + dmar_tbl->length)) {
634 /* Avoid looping forever on bad ACPI tables */
635 if (entry_header->length == 0) {
636 printk(KERN_WARNING PREFIX
637 "Invalid 0-length structure\n");
638 return 0;
639 }
640
641 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000642 void __iomem *addr;
643 u64 cap, ecap;
644
David Woodhouse86cf8982009-11-09 22:15:15 +0000645 drhd = (void *)entry_header;
646 if (!drhd->address) {
647 /* Promote an attitude of violence to a BIOS engineer today */
648 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
649 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
650 dmi_get_system_info(DMI_BIOS_VENDOR),
651 dmi_get_system_info(DMI_BIOS_VERSION),
652 dmi_get_system_info(DMI_PRODUCT_VERSION));
David Woodhouse6ecbf012009-12-02 09:20:27 +0000653 bios_warned = 1;
Chris Wright2c992202009-12-02 09:17:13 +0000654 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000655 }
Chris Wright2c992202009-12-02 09:17:13 +0000656
657 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
658 if (!addr ) {
659 printk("IOMMU: can't validate: %llx\n", drhd->address);
660 goto failed;
661 }
662 cap = dmar_readq(addr + DMAR_CAP_REG);
663 ecap = dmar_readq(addr + DMAR_ECAP_REG);
664 early_iounmap(addr, VTD_PAGE_SIZE);
665 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
666 /* Promote an attitude of violence to a BIOS engineer today */
667 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
668 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
669 drhd->address,
670 dmi_get_system_info(DMI_BIOS_VENDOR),
671 dmi_get_system_info(DMI_BIOS_VERSION),
672 dmi_get_system_info(DMI_PRODUCT_VERSION));
David Woodhouse6ecbf012009-12-02 09:20:27 +0000673 bios_warned = 1;
Chris Wright2c992202009-12-02 09:17:13 +0000674 goto failed;
675 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000676 }
677
678 entry_header = ((void *)entry_header + entry_header->length);
679 }
680 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000681
682failed:
683#ifdef CONFIG_DMAR
684 dmar_disabled = 1;
685#endif
686 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000687}
688
Suresh Siddha2ae21012008-07-10 11:16:43 -0700689void __init detect_intel_iommu(void)
690{
691 int ret;
692
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700693 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000694 if (ret)
695 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700696 {
Youquan Songcacd4212008-10-16 16:31:57 -0700697#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700698 struct acpi_table_dmar *dmar;
699 /*
700 * for now we will disable dma-remapping when interrupt
701 * remapping is enabled.
702 * When support for queued invalidation for IOTLB invalidation
703 * is added, we will not need this any more.
704 */
705 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700706 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700707 printk(KERN_INFO
708 "Queued invalidation will be enabled to support "
709 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700710#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700711#ifdef CONFIG_DMAR
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800712 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700713 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800714 /* Make sure ACS will be enabled */
715 pci_request_acs();
716 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700717#endif
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900718#ifdef CONFIG_X86
719 if (ret)
720 x86_init.iommu.iommu_init = intel_iommu_init;
721#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700722 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800723 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700724 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700725}
726
727
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700728int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700729{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700730 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700731 int map_size;
732 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700733 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100734 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700735 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700736
David Woodhouse6ecbf012009-12-02 09:20:27 +0000737 if (!drhd->reg_base_addr) {
738 if (!bios_warned) {
739 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
740 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
741 dmi_get_system_info(DMI_BIOS_VENDOR),
742 dmi_get_system_info(DMI_BIOS_VERSION),
743 dmi_get_system_info(DMI_PRODUCT_VERSION));
744 bios_warned = 1;
745 }
746 return -EINVAL;
747 }
748
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700749 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
750 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700751 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700752
753 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700754 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700755
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700756 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700757 if (!iommu->reg) {
758 printk(KERN_ERR "IOMMU: can't map the region\n");
759 goto error;
760 }
761 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
762 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
763
David Woodhouse08155652009-08-04 09:17:20 +0100764 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
David Woodhouse6ecbf012009-12-02 09:20:27 +0000765 if (!bios_warned) {
766 /* Promote an attitude of violence to a BIOS engineer today */
767 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
768 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
769 drhd->reg_base_addr,
770 dmi_get_system_info(DMI_BIOS_VENDOR),
771 dmi_get_system_info(DMI_BIOS_VERSION),
772 dmi_get_system_info(DMI_PRODUCT_VERSION));
773 bios_warned = 1;
774 }
David Woodhouse08155652009-08-04 09:17:20 +0100775 goto err_unmap;
776 }
777
Joerg Roedel43f73922009-01-03 23:56:27 +0100778#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800779 agaw = iommu_calculate_agaw(iommu);
780 if (agaw < 0) {
781 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700782 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
783 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100784 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700785 }
786 msagaw = iommu_calculate_max_sagaw(iommu);
787 if (msagaw < 0) {
788 printk(KERN_ERR
789 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800790 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100791 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800792 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100793#endif
Weidong Han1b573682008-12-08 15:34:06 +0800794 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700795 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800796
Suresh Siddhaee34b322009-10-02 11:01:21 -0700797 iommu->node = -1;
798
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700799 /* the registers might be more than one page */
800 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
801 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700802 map_size = VTD_PAGE_ALIGN(map_size);
803 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700804 iounmap(iommu->reg);
805 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
806 if (!iommu->reg) {
807 printk(KERN_ERR "IOMMU: can't map the region\n");
808 goto error;
809 }
810 }
811
812 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100813 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
814 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700815 (unsigned long long)drhd->reg_base_addr,
816 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
817 (unsigned long long)iommu->cap,
818 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700819
820 spin_lock_init(&iommu->register_lock);
821
822 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700823 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100824
825 err_unmap:
826 iounmap(iommu->reg);
827 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700828 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700829 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700830}
831
832void free_iommu(struct intel_iommu *iommu)
833{
834 if (!iommu)
835 return;
836
837#ifdef CONFIG_DMAR
838 free_dmar_iommu(iommu);
839#endif
840
841 if (iommu->reg)
842 iounmap(iommu->reg);
843 kfree(iommu);
844}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700845
846/*
847 * Reclaim all the submitted descriptors which have completed its work.
848 */
849static inline void reclaim_free_desc(struct q_inval *qi)
850{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800851 while (qi->desc_status[qi->free_tail] == QI_DONE ||
852 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700853 qi->desc_status[qi->free_tail] = QI_FREE;
854 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
855 qi->free_cnt++;
856 }
857}
858
Yu Zhao704126a2009-01-04 16:28:52 +0800859static int qi_check_fault(struct intel_iommu *iommu, int index)
860{
861 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800862 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800863 struct q_inval *qi = iommu->qi;
864 int wait_index = (index + 1) % QI_LENGTH;
865
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800866 if (qi->desc_status[wait_index] == QI_ABORT)
867 return -EAGAIN;
868
Yu Zhao704126a2009-01-04 16:28:52 +0800869 fault = readl(iommu->reg + DMAR_FSTS_REG);
870
871 /*
872 * If IQE happens, the head points to the descriptor associated
873 * with the error. No new descriptors are fetched until the IQE
874 * is cleared.
875 */
876 if (fault & DMA_FSTS_IQE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800878 if ((head >> DMAR_IQ_SHIFT) == index) {
879 printk(KERN_ERR "VT-d detected invalid descriptor: "
880 "low=%llx, high=%llx\n",
881 (unsigned long long)qi->desc[index].low,
882 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800883 memcpy(&qi->desc[index], &qi->desc[wait_index],
884 sizeof(struct qi_desc));
885 __iommu_flush_cache(iommu, &qi->desc[index],
886 sizeof(struct qi_desc));
887 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
888 return -EINVAL;
889 }
890 }
891
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800892 /*
893 * If ITE happens, all pending wait_desc commands are aborted.
894 * No new descriptors are fetched until the ITE is cleared.
895 */
896 if (fault & DMA_FSTS_ITE) {
897 head = readl(iommu->reg + DMAR_IQH_REG);
898 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
899 head |= 1;
900 tail = readl(iommu->reg + DMAR_IQT_REG);
901 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
902
903 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
904
905 do {
906 if (qi->desc_status[head] == QI_IN_USE)
907 qi->desc_status[head] = QI_ABORT;
908 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
909 } while (head != tail);
910
911 if (qi->desc_status[wait_index] == QI_ABORT)
912 return -EAGAIN;
913 }
914
915 if (fault & DMA_FSTS_ICE)
916 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
917
Yu Zhao704126a2009-01-04 16:28:52 +0800918 return 0;
919}
920
Suresh Siddhafe962e92008-07-10 11:16:42 -0700921/*
922 * Submit the queued invalidation descriptor to the remapping
923 * hardware unit and wait for its completion.
924 */
Yu Zhao704126a2009-01-04 16:28:52 +0800925int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700926{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800927 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700928 struct q_inval *qi = iommu->qi;
929 struct qi_desc *hw, wait_desc;
930 int wait_index, index;
931 unsigned long flags;
932
933 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800934 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700935
936 hw = qi->desc;
937
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800938restart:
939 rc = 0;
940
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700941 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700942 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700943 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700944 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700945 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700946 }
947
948 index = qi->free_head;
949 wait_index = (index + 1) % QI_LENGTH;
950
951 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
952
953 hw[index] = *desc;
954
Yu Zhao704126a2009-01-04 16:28:52 +0800955 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
956 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700957 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
958
959 hw[wait_index] = wait_desc;
960
961 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
962 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
963
964 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
965 qi->free_cnt -= 2;
966
Suresh Siddhafe962e92008-07-10 11:16:42 -0700967 /*
968 * update the HW tail register indicating the presence of
969 * new descriptors.
970 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800971 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700972
973 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700974 /*
975 * We will leave the interrupts disabled, to prevent interrupt
976 * context to queue another cmd while a cmd is already submitted
977 * and waiting for completion on this cpu. This is to avoid
978 * a deadlock where the interrupt context can wait indefinitely
979 * for free slots in the queue.
980 */
Yu Zhao704126a2009-01-04 16:28:52 +0800981 rc = qi_check_fault(iommu, index);
982 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800983 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800984
Suresh Siddhafe962e92008-07-10 11:16:42 -0700985 spin_unlock(&qi->q_lock);
986 cpu_relax();
987 spin_lock(&qi->q_lock);
988 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800989
990 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700991
992 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700993 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800994
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800995 if (rc == -EAGAIN)
996 goto restart;
997
Yu Zhao704126a2009-01-04 16:28:52 +0800998 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700999}
1000
1001/*
1002 * Flush the global interrupt entry cache.
1003 */
1004void qi_global_iec(struct intel_iommu *iommu)
1005{
1006 struct qi_desc desc;
1007
1008 desc.low = QI_IEC_TYPE;
1009 desc.high = 0;
1010
Yu Zhao704126a2009-01-04 16:28:52 +08001011 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001012 qi_submit_sync(&desc, iommu);
1013}
1014
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001015void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1016 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001017{
Youquan Song3481f212008-10-16 16:31:55 -07001018 struct qi_desc desc;
1019
Youquan Song3481f212008-10-16 16:31:55 -07001020 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1021 | QI_CC_GRAN(type) | QI_CC_TYPE;
1022 desc.high = 0;
1023
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001024 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001025}
1026
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001027void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1028 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001029{
1030 u8 dw = 0, dr = 0;
1031
1032 struct qi_desc desc;
1033 int ih = 0;
1034
Youquan Song3481f212008-10-16 16:31:55 -07001035 if (cap_write_drain(iommu->cap))
1036 dw = 1;
1037
1038 if (cap_read_drain(iommu->cap))
1039 dr = 1;
1040
1041 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1042 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1043 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1044 | QI_IOTLB_AM(size_order);
1045
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001046 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001047}
1048
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001049void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1050 u64 addr, unsigned mask)
1051{
1052 struct qi_desc desc;
1053
1054 if (mask) {
1055 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1056 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1057 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1058 } else
1059 desc.high = QI_DEV_IOTLB_ADDR(addr);
1060
1061 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1062 qdep = 0;
1063
1064 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1065 QI_DIOTLB_TYPE;
1066
1067 qi_submit_sync(&desc, iommu);
1068}
1069
Suresh Siddhafe962e92008-07-10 11:16:42 -07001070/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001071 * Disable Queued Invalidation interface.
1072 */
1073void dmar_disable_qi(struct intel_iommu *iommu)
1074{
1075 unsigned long flags;
1076 u32 sts;
1077 cycles_t start_time = get_cycles();
1078
1079 if (!ecap_qis(iommu->ecap))
1080 return;
1081
1082 spin_lock_irqsave(&iommu->register_lock, flags);
1083
1084 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1085 if (!(sts & DMA_GSTS_QIES))
1086 goto end;
1087
1088 /*
1089 * Give a chance to HW to complete the pending invalidation requests.
1090 */
1091 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1092 readl(iommu->reg + DMAR_IQH_REG)) &&
1093 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1094 cpu_relax();
1095
1096 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001097 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1098
1099 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1100 !(sts & DMA_GSTS_QIES), sts);
1101end:
1102 spin_unlock_irqrestore(&iommu->register_lock, flags);
1103}
1104
1105/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001106 * Enable queued invalidation.
1107 */
1108static void __dmar_enable_qi(struct intel_iommu *iommu)
1109{
David Woodhousec416daa2009-05-10 20:30:58 +01001110 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001111 unsigned long flags;
1112 struct q_inval *qi = iommu->qi;
1113
1114 qi->free_head = qi->free_tail = 0;
1115 qi->free_cnt = QI_LENGTH;
1116
1117 spin_lock_irqsave(&iommu->register_lock, flags);
1118
1119 /* write zero to the tail reg */
1120 writel(0, iommu->reg + DMAR_IQT_REG);
1121
1122 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1123
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001124 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001126
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1129
1130 spin_unlock_irqrestore(&iommu->register_lock, flags);
1131}
1132
1133/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001134 * Enable Queued Invalidation interface. This is a must to support
1135 * interrupt-remapping. Also used by DMA-remapping, which replaces
1136 * register based IOTLB invalidation.
1137 */
1138int dmar_enable_qi(struct intel_iommu *iommu)
1139{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001140 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001141 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001142
1143 if (!ecap_qis(iommu->ecap))
1144 return -ENOENT;
1145
1146 /*
1147 * queued invalidation is already setup and enabled.
1148 */
1149 if (iommu->qi)
1150 return 0;
1151
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001152 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001153 if (!iommu->qi)
1154 return -ENOMEM;
1155
1156 qi = iommu->qi;
1157
Suresh Siddha751cafe2009-10-02 11:01:22 -07001158
1159 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1160 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001161 kfree(qi);
1162 iommu->qi = 0;
1163 return -ENOMEM;
1164 }
1165
Suresh Siddha751cafe2009-10-02 11:01:22 -07001166 qi->desc = page_address(desc_page);
1167
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001168 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001169 if (!qi->desc_status) {
1170 free_page((unsigned long) qi->desc);
1171 kfree(qi);
1172 iommu->qi = 0;
1173 return -ENOMEM;
1174 }
1175
1176 qi->free_head = qi->free_tail = 0;
1177 qi->free_cnt = QI_LENGTH;
1178
1179 spin_lock_init(&qi->q_lock);
1180
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001181 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001182
1183 return 0;
1184}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001185
1186/* iommu interrupt handling. Most stuff are MSI-like. */
1187
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001188enum faulttype {
1189 DMA_REMAP,
1190 INTR_REMAP,
1191 UNKNOWN,
1192};
1193
1194static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001195{
1196 "Software",
1197 "Present bit in root entry is clear",
1198 "Present bit in context entry is clear",
1199 "Invalid context entry",
1200 "Access beyond MGAW",
1201 "PTE Write access is not set",
1202 "PTE Read access is not set",
1203 "Next page table ptr is invalid",
1204 "Root table address invalid",
1205 "Context table ptr is invalid",
1206 "non-zero reserved fields in RTP",
1207 "non-zero reserved fields in CTP",
1208 "non-zero reserved fields in PTE",
1209};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001210
1211static const char *intr_remap_fault_reasons[] =
1212{
1213 "Detected reserved fields in the decoded interrupt-remapped request",
1214 "Interrupt index exceeded the interrupt-remapping table size",
1215 "Present field in the IRTE entry is clear",
1216 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1217 "Detected reserved fields in the IRTE entry",
1218 "Blocked a compatibility format interrupt request",
1219 "Blocked an interrupt request due to source-id verification failure",
1220};
1221
Suresh Siddha0ac24912009-03-16 17:04:54 -07001222#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1223
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001224const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001225{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001226 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1227 ARRAY_SIZE(intr_remap_fault_reasons))) {
1228 *fault_type = INTR_REMAP;
1229 return intr_remap_fault_reasons[fault_reason - 0x20];
1230 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1231 *fault_type = DMA_REMAP;
1232 return dma_remap_fault_reasons[fault_reason];
1233 } else {
1234 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001235 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001236 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001237}
1238
1239void dmar_msi_unmask(unsigned int irq)
1240{
1241 struct intel_iommu *iommu = get_irq_data(irq);
1242 unsigned long flag;
1243
1244 /* unmask it */
1245 spin_lock_irqsave(&iommu->register_lock, flag);
1246 writel(0, iommu->reg + DMAR_FECTL_REG);
1247 /* Read a reg to force flush the post write */
1248 readl(iommu->reg + DMAR_FECTL_REG);
1249 spin_unlock_irqrestore(&iommu->register_lock, flag);
1250}
1251
1252void dmar_msi_mask(unsigned int irq)
1253{
1254 unsigned long flag;
1255 struct intel_iommu *iommu = get_irq_data(irq);
1256
1257 /* mask it */
1258 spin_lock_irqsave(&iommu->register_lock, flag);
1259 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1260 /* Read a reg to force flush the post write */
1261 readl(iommu->reg + DMAR_FECTL_REG);
1262 spin_unlock_irqrestore(&iommu->register_lock, flag);
1263}
1264
1265void dmar_msi_write(int irq, struct msi_msg *msg)
1266{
1267 struct intel_iommu *iommu = get_irq_data(irq);
1268 unsigned long flag;
1269
1270 spin_lock_irqsave(&iommu->register_lock, flag);
1271 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1272 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1273 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1274 spin_unlock_irqrestore(&iommu->register_lock, flag);
1275}
1276
1277void dmar_msi_read(int irq, struct msi_msg *msg)
1278{
1279 struct intel_iommu *iommu = get_irq_data(irq);
1280 unsigned long flag;
1281
1282 spin_lock_irqsave(&iommu->register_lock, flag);
1283 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1284 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1285 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1286 spin_unlock_irqrestore(&iommu->register_lock, flag);
1287}
1288
1289static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1290 u8 fault_reason, u16 source_id, unsigned long long addr)
1291{
1292 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001293 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001294
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001295 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001296
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001297 if (fault_type == INTR_REMAP)
1298 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1299 "fault index %llx\n"
1300 "INTR-REMAP:[fault reason %02d] %s\n",
1301 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1302 PCI_FUNC(source_id & 0xFF), addr >> 48,
1303 fault_reason, reason);
1304 else
1305 printk(KERN_ERR
1306 "DMAR:[%s] Request device [%02x:%02x.%d] "
1307 "fault addr %llx \n"
1308 "DMAR:[fault reason %02d] %s\n",
1309 (type ? "DMA Read" : "DMA Write"),
1310 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1311 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001312 return 0;
1313}
1314
1315#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001316irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001317{
1318 struct intel_iommu *iommu = dev_id;
1319 int reg, fault_index;
1320 u32 fault_status;
1321 unsigned long flag;
1322
1323 spin_lock_irqsave(&iommu->register_lock, flag);
1324 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001325 if (fault_status)
1326 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1327 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001328
1329 /* TBD: ignore advanced fault log currently */
1330 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001331 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001332
1333 fault_index = dma_fsts_fault_record_index(fault_status);
1334 reg = cap_fault_reg_offset(iommu->cap);
1335 while (1) {
1336 u8 fault_reason;
1337 u16 source_id;
1338 u64 guest_addr;
1339 int type;
1340 u32 data;
1341
1342 /* highest 32 bits */
1343 data = readl(iommu->reg + reg +
1344 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1345 if (!(data & DMA_FRCD_F))
1346 break;
1347
1348 fault_reason = dma_frcd_fault_reason(data);
1349 type = dma_frcd_type(data);
1350
1351 data = readl(iommu->reg + reg +
1352 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1353 source_id = dma_frcd_source_id(data);
1354
1355 guest_addr = dmar_readq(iommu->reg + reg +
1356 fault_index * PRIMARY_FAULT_REG_LEN);
1357 guest_addr = dma_frcd_page_addr(guest_addr);
1358 /* clear the fault */
1359 writel(DMA_FRCD_F, iommu->reg + reg +
1360 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1361
1362 spin_unlock_irqrestore(&iommu->register_lock, flag);
1363
1364 dmar_fault_do_one(iommu, type, fault_reason,
1365 source_id, guest_addr);
1366
1367 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001368 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001369 fault_index = 0;
1370 spin_lock_irqsave(&iommu->register_lock, flag);
1371 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001372clear_rest:
1373 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001374 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001375 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001376
1377 spin_unlock_irqrestore(&iommu->register_lock, flag);
1378 return IRQ_HANDLED;
1379}
1380
1381int dmar_set_interrupt(struct intel_iommu *iommu)
1382{
1383 int irq, ret;
1384
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001385 /*
1386 * Check if the fault interrupt is already initialized.
1387 */
1388 if (iommu->irq)
1389 return 0;
1390
Suresh Siddha0ac24912009-03-16 17:04:54 -07001391 irq = create_irq();
1392 if (!irq) {
1393 printk(KERN_ERR "IOMMU: no free vectors\n");
1394 return -EINVAL;
1395 }
1396
1397 set_irq_data(irq, iommu);
1398 iommu->irq = irq;
1399
1400 ret = arch_setup_dmar_msi(irq);
1401 if (ret) {
1402 set_irq_data(irq, NULL);
1403 iommu->irq = 0;
1404 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001405 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001406 }
1407
Suresh Siddha0ac24912009-03-16 17:04:54 -07001408 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1409 if (ret)
1410 printk(KERN_ERR "IOMMU: can't request irq\n");
1411 return ret;
1412}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001413
1414int __init enable_drhd_fault_handling(void)
1415{
1416 struct dmar_drhd_unit *drhd;
1417
1418 /*
1419 * Enable fault control interrupt.
1420 */
1421 for_each_drhd_unit(drhd) {
1422 int ret;
1423 struct intel_iommu *iommu = drhd->iommu;
1424 ret = dmar_set_interrupt(iommu);
1425
1426 if (ret) {
1427 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1428 " interrupt, ret %d\n",
1429 (unsigned long long)drhd->reg_base_addr, ret);
1430 return -1;
1431 }
1432 }
1433
1434 return 0;
1435}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001436
1437/*
1438 * Re-enable Queued Invalidation interface.
1439 */
1440int dmar_reenable_qi(struct intel_iommu *iommu)
1441{
1442 if (!ecap_qis(iommu->ecap))
1443 return -ENOENT;
1444
1445 if (!iommu->qi)
1446 return -ENOENT;
1447
1448 /*
1449 * First disable queued invalidation.
1450 */
1451 dmar_disable_qi(iommu);
1452 /*
1453 * Then enable queued invalidation again. Since there is no pending
1454 * invalidation requests now, it's safe to re-enable queued
1455 * invalidation.
1456 */
1457 __dmar_enable_qi(iommu);
1458
1459 return 0;
1460}
Youquan Song074835f2009-09-09 12:05:39 -04001461
1462/*
1463 * Check interrupt remapping support in DMAR table description.
1464 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001465int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001466{
1467 struct acpi_table_dmar *dmar;
1468 dmar = (struct acpi_table_dmar *)dmar_tbl;
1469 return dmar->flags & 0x1;
1470}