blob: 6d7f9619b8a982880cebf0e298e12910cee8924e [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070036
37#undef PREFIX
38#define PREFIX "DMAR:"
39
40/* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
43 */
44LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070045
46static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080047static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070048
49static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
50{
51 /*
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
53 * the very end.
54 */
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
57 else
58 list_add(&drhd->list, &dmar_drhd_units);
59}
60
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070061static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
63{
64 struct pci_bus *bus;
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
67 int count;
68
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
73
74 while (count) {
75 if (pdev)
76 pci_dev_put(pdev);
77 /*
78 * Some BIOSes list non-exist devices in DMAR table, just
79 * ignore it
80 */
81 if (!bus) {
82 printk(KERN_WARNING
83 PREFIX "Device scope bus [%d] not found\n",
84 scope->bus);
85 break;
86 }
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
88 if (!pdev) {
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
92 break;
93 }
94 path ++;
95 count --;
96 bus = pdev->subordinate;
97 }
98 if (!pdev) {
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
102 *dev = NULL;
103 return 0;
104 }
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
118static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
136 }
137 if (*cnt == 0)
138 return 0;
139
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
143
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
153 kfree(*devices);
154 return ret;
155 }
156 index ++;
157 }
158 start += scope->length;
159 }
160
161 return 0;
162}
163
164/**
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
168 */
169static int __init
170dmar_parse_one_drhd(struct acpi_dmar_header *header)
171{
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
174 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700175
David Woodhousee523b382009-04-10 22:27:48 -0700176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
187 if (!dmaru)
188 return -ENOMEM;
189
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700190 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700191 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100192 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
194
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700195 ret = alloc_iommu(dmaru);
196 if (ret) {
197 kfree(dmaru);
198 return ret;
199 }
200 dmar_register_drhd_unit(dmaru);
201 return 0;
202}
203
David Woodhousef82851a2008-10-18 15:43:14 +0100204static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700205{
206 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100207 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208
209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
210
Yu Zhao2e824f72008-12-22 16:54:58 +0800211 if (dmaru->include_all)
212 return 0;
213
214 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700215 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700216 &dmaru->devices_cnt, &dmaru->devices,
217 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700218 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700220 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700221 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700222 return ret;
223}
224
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700225#ifdef CONFIG_DMAR
226LIST_HEAD(dmar_rmrr_units);
227
228static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
229{
230 list_add(&rmrr->list, &dmar_rmrr_units);
231}
232
233
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700234static int __init
235dmar_parse_one_rmrr(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_reserved_memory *rmrr;
238 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700239
240 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
241 if (!rmrru)
242 return -ENOMEM;
243
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700244 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700245 rmrr = (struct acpi_dmar_reserved_memory *)header;
246 rmrru->base_address = rmrr->base_address;
247 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700248
249 dmar_register_rmrr_unit(rmrru);
250 return 0;
251}
252
253static int __init
254rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
255{
256 struct acpi_dmar_reserved_memory *rmrr;
257 int ret;
258
259 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700260 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700261 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700262 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
263
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700264 if (ret || (rmrru->devices_cnt == 0)) {
265 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700266 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700267 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700268 return ret;
269}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800270
271static LIST_HEAD(dmar_atsr_units);
272
273static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
274{
275 struct acpi_dmar_atsr *atsr;
276 struct dmar_atsr_unit *atsru;
277
278 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
279 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
280 if (!atsru)
281 return -ENOMEM;
282
283 atsru->hdr = hdr;
284 atsru->include_all = atsr->flags & 0x1;
285
286 list_add(&atsru->list, &dmar_atsr_units);
287
288 return 0;
289}
290
291static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
292{
293 int rc;
294 struct acpi_dmar_atsr *atsr;
295
296 if (atsru->include_all)
297 return 0;
298
299 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
300 rc = dmar_parse_dev_scope((void *)(atsr + 1),
301 (void *)atsr + atsr->header.length,
302 &atsru->devices_cnt, &atsru->devices,
303 atsr->segment);
304 if (rc || !atsru->devices_cnt) {
305 list_del(&atsru->list);
306 kfree(atsru);
307 }
308
309 return rc;
310}
311
312int dmar_find_matched_atsr_unit(struct pci_dev *dev)
313{
314 int i;
315 struct pci_bus *bus;
316 struct acpi_dmar_atsr *atsr;
317 struct dmar_atsr_unit *atsru;
318
319 list_for_each_entry(atsru, &dmar_atsr_units, list) {
320 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
321 if (atsr->segment == pci_domain_nr(dev->bus))
322 goto found;
323 }
324
325 return 0;
326
327found:
328 for (bus = dev->bus; bus; bus = bus->parent) {
329 struct pci_dev *bridge = bus->self;
330
331 if (!bridge || !bridge->is_pcie ||
332 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
333 return 0;
334
335 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
336 for (i = 0; i < atsru->devices_cnt; i++)
337 if (atsru->devices[i] == bridge)
338 return 1;
339 break;
340 }
341 }
342
343 if (atsru->include_all)
344 return 1;
345
346 return 0;
347}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700348#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700349
350static void __init
351dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
352{
353 struct acpi_dmar_hardware_unit *drhd;
354 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800355 struct acpi_dmar_atsr *atsr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700356
357 switch (header->type) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800359 drhd = container_of(header, struct acpi_dmar_hardware_unit,
360 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700361 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800362 "DRHD base: %#016Lx flags: %#x\n",
363 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700364 break;
365 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800366 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
367 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700368 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800369 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700370 (unsigned long long)rmrr->base_address,
371 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700372 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800373 case ACPI_DMAR_TYPE_ATSR:
374 atsr = container_of(header, struct acpi_dmar_atsr, header);
375 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
376 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700377 }
378}
379
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700380/**
381 * dmar_table_detect - checks to see if the platform supports DMAR devices
382 */
383static int __init dmar_table_detect(void)
384{
385 acpi_status status = AE_OK;
386
387 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800388 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
389 (struct acpi_table_header **)&dmar_tbl,
390 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700391
392 if (ACPI_SUCCESS(status) && !dmar_tbl) {
393 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
394 status = AE_NOT_FOUND;
395 }
396
397 return (ACPI_SUCCESS(status) ? 1 : 0);
398}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700399
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700400/**
401 * parse_dmar_table - parses the DMA reporting table
402 */
403static int __init
404parse_dmar_table(void)
405{
406 struct acpi_table_dmar *dmar;
407 struct acpi_dmar_header *entry_header;
408 int ret = 0;
409
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700410 /*
411 * Do it again, earlier dmar_tbl mapping could be mapped with
412 * fixed map.
413 */
414 dmar_table_detect();
415
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700416 dmar = (struct acpi_table_dmar *)dmar_tbl;
417 if (!dmar)
418 return -ENODEV;
419
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700420 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800421 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700422 return -EINVAL;
423 }
424
425 printk (KERN_INFO PREFIX "Host address width %d\n",
426 dmar->width + 1);
427
428 entry_header = (struct acpi_dmar_header *)(dmar + 1);
429 while (((unsigned long)entry_header) <
430 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800431 /* Avoid looping forever on bad ACPI tables */
432 if (entry_header->length == 0) {
433 printk(KERN_WARNING PREFIX
434 "Invalid 0-length structure\n");
435 ret = -EINVAL;
436 break;
437 }
438
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700439 dmar_table_print_dmar_entry(entry_header);
440
441 switch (entry_header->type) {
442 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
443 ret = dmar_parse_one_drhd(entry_header);
444 break;
445 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700446#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700447 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700448#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700449 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800450 case ACPI_DMAR_TYPE_ATSR:
451#ifdef CONFIG_DMAR
452 ret = dmar_parse_one_atsr(entry_header);
453#endif
454 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700455 default:
456 printk(KERN_WARNING PREFIX
457 "Unknown DMAR structure type\n");
458 ret = 0; /* for forward compatibility */
459 break;
460 }
461 if (ret)
462 break;
463
464 entry_header = ((void *)entry_header + entry_header->length);
465 }
466 return ret;
467}
468
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700469int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
470 struct pci_dev *dev)
471{
472 int index;
473
474 while (dev) {
475 for (index = 0; index < cnt; index++)
476 if (dev == devices[index])
477 return 1;
478
479 /* Check our parent */
480 dev = dev->bus->self;
481 }
482
483 return 0;
484}
485
486struct dmar_drhd_unit *
487dmar_find_matched_drhd_unit(struct pci_dev *dev)
488{
Yu Zhao2e824f72008-12-22 16:54:58 +0800489 struct dmar_drhd_unit *dmaru = NULL;
490 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700491
Yu Zhao2e824f72008-12-22 16:54:58 +0800492 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
493 drhd = container_of(dmaru->hdr,
494 struct acpi_dmar_hardware_unit,
495 header);
496
497 if (dmaru->include_all &&
498 drhd->segment == pci_domain_nr(dev->bus))
499 return dmaru;
500
501 if (dmar_pci_device_match(dmaru->devices,
502 dmaru->devices_cnt, dev))
503 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700504 }
505
506 return NULL;
507}
508
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700509int __init dmar_dev_scope_init(void)
510{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700511 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700512 int ret = -ENODEV;
513
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700514 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700515 ret = dmar_parse_dev(drhd);
516 if (ret)
517 return ret;
518 }
519
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700520#ifdef CONFIG_DMAR
521 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700522 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800523 struct dmar_atsr_unit *atsr, *atsr_n;
524
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700525 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700526 ret = rmrr_parse_dev(rmrr);
527 if (ret)
528 return ret;
529 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800530
531 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
532 ret = atsr_parse_dev(atsr);
533 if (ret)
534 return ret;
535 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700536 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700537#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700538
539 return ret;
540}
541
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700542
543int __init dmar_table_init(void)
544{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700545 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800546 int ret;
547
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700548 if (dmar_table_initialized)
549 return 0;
550
551 dmar_table_initialized = 1;
552
Fenghua Yu093f87d2007-11-21 15:07:14 -0800553 ret = parse_dmar_table();
554 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700555 if (ret != -ENODEV)
556 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800557 return ret;
558 }
559
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700560 if (list_empty(&dmar_drhd_units)) {
561 printk(KERN_INFO PREFIX "No DMAR devices found\n");
562 return -ENODEV;
563 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800564
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700565#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700566 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800567 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800568
569 if (list_empty(&dmar_atsr_units))
570 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700571#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800572
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700573#ifdef CONFIG_INTR_REMAP
574 parse_ioapics_under_ir();
575#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700576 return 0;
577}
578
Suresh Siddha2ae21012008-07-10 11:16:43 -0700579void __init detect_intel_iommu(void)
580{
581 int ret;
582
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700583 ret = dmar_table_detect();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700584
Suresh Siddha2ae21012008-07-10 11:16:43 -0700585 {
Youquan Songcacd4212008-10-16 16:31:57 -0700586#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700587 struct acpi_table_dmar *dmar;
588 /*
589 * for now we will disable dma-remapping when interrupt
590 * remapping is enabled.
591 * When support for queued invalidation for IOTLB invalidation
592 * is added, we will not need this any more.
593 */
594 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700595 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700596 printk(KERN_INFO
597 "Queued invalidation will be enabled to support "
598 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700599#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700600#ifdef CONFIG_DMAR
Suresh Siddha2ae21012008-07-10 11:16:43 -0700601 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
602 !dmar_disabled)
603 iommu_detected = 1;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700604#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700605 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800606 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700607 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700608}
609
610
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700611int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700612{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700613 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700614 int map_size;
615 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700616 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100617 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700618 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700619
620 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
621 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700622 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700623
624 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700625 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700626
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700627 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700628 if (!iommu->reg) {
629 printk(KERN_ERR "IOMMU: can't map the region\n");
630 goto error;
631 }
632 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
633 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
634
Joerg Roedel43f73922009-01-03 23:56:27 +0100635#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800636 agaw = iommu_calculate_agaw(iommu);
637 if (agaw < 0) {
638 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700639 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id);
641 goto error;
642 }
643 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) {
645 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800647 iommu->seq_id);
648 goto error;
649 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100650#endif
Weidong Han1b573682008-12-08 15:34:06 +0800651 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700652 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800653
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700654 /* the registers might be more than one page */
655 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
656 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700657 map_size = VTD_PAGE_ALIGN(map_size);
658 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700659 iounmap(iommu->reg);
660 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
661 if (!iommu->reg) {
662 printk(KERN_ERR "IOMMU: can't map the region\n");
663 goto error;
664 }
665 }
666
667 ver = readl(iommu->reg + DMAR_VER_REG);
668 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700669 (unsigned long long)drhd->reg_base_addr,
670 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
671 (unsigned long long)iommu->cap,
672 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700673
674 spin_lock_init(&iommu->register_lock);
675
676 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700677 return 0;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700678error:
679 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700680 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700681}
682
683void free_iommu(struct intel_iommu *iommu)
684{
685 if (!iommu)
686 return;
687
688#ifdef CONFIG_DMAR
689 free_dmar_iommu(iommu);
690#endif
691
692 if (iommu->reg)
693 iounmap(iommu->reg);
694 kfree(iommu);
695}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700696
697/*
698 * Reclaim all the submitted descriptors which have completed its work.
699 */
700static inline void reclaim_free_desc(struct q_inval *qi)
701{
702 while (qi->desc_status[qi->free_tail] == QI_DONE) {
703 qi->desc_status[qi->free_tail] = QI_FREE;
704 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
705 qi->free_cnt++;
706 }
707}
708
Yu Zhao704126a2009-01-04 16:28:52 +0800709static int qi_check_fault(struct intel_iommu *iommu, int index)
710{
711 u32 fault;
712 int head;
713 struct q_inval *qi = iommu->qi;
714 int wait_index = (index + 1) % QI_LENGTH;
715
716 fault = readl(iommu->reg + DMAR_FSTS_REG);
717
718 /*
719 * If IQE happens, the head points to the descriptor associated
720 * with the error. No new descriptors are fetched until the IQE
721 * is cleared.
722 */
723 if (fault & DMA_FSTS_IQE) {
724 head = readl(iommu->reg + DMAR_IQH_REG);
725 if ((head >> 4) == index) {
726 memcpy(&qi->desc[index], &qi->desc[wait_index],
727 sizeof(struct qi_desc));
728 __iommu_flush_cache(iommu, &qi->desc[index],
729 sizeof(struct qi_desc));
730 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
731 return -EINVAL;
732 }
733 }
734
735 return 0;
736}
737
Suresh Siddhafe962e92008-07-10 11:16:42 -0700738/*
739 * Submit the queued invalidation descriptor to the remapping
740 * hardware unit and wait for its completion.
741 */
Yu Zhao704126a2009-01-04 16:28:52 +0800742int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700743{
Yu Zhao704126a2009-01-04 16:28:52 +0800744 int rc = 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700745 struct q_inval *qi = iommu->qi;
746 struct qi_desc *hw, wait_desc;
747 int wait_index, index;
748 unsigned long flags;
749
750 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800751 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700752
753 hw = qi->desc;
754
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700755 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700756 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700757 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700758 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700759 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700760 }
761
762 index = qi->free_head;
763 wait_index = (index + 1) % QI_LENGTH;
764
765 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
766
767 hw[index] = *desc;
768
Yu Zhao704126a2009-01-04 16:28:52 +0800769 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
770 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700771 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
772
773 hw[wait_index] = wait_desc;
774
775 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
776 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
777
778 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
779 qi->free_cnt -= 2;
780
Suresh Siddhafe962e92008-07-10 11:16:42 -0700781 /*
782 * update the HW tail register indicating the presence of
783 * new descriptors.
784 */
785 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700786
787 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700788 /*
789 * We will leave the interrupts disabled, to prevent interrupt
790 * context to queue another cmd while a cmd is already submitted
791 * and waiting for completion on this cpu. This is to avoid
792 * a deadlock where the interrupt context can wait indefinitely
793 * for free slots in the queue.
794 */
Yu Zhao704126a2009-01-04 16:28:52 +0800795 rc = qi_check_fault(iommu, index);
796 if (rc)
797 goto out;
798
Suresh Siddhafe962e92008-07-10 11:16:42 -0700799 spin_unlock(&qi->q_lock);
800 cpu_relax();
801 spin_lock(&qi->q_lock);
802 }
Yu Zhao704126a2009-01-04 16:28:52 +0800803out:
804 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700805
806 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700807 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800808
809 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700810}
811
812/*
813 * Flush the global interrupt entry cache.
814 */
815void qi_global_iec(struct intel_iommu *iommu)
816{
817 struct qi_desc desc;
818
819 desc.low = QI_IEC_TYPE;
820 desc.high = 0;
821
Yu Zhao704126a2009-01-04 16:28:52 +0800822 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700823 qi_submit_sync(&desc, iommu);
824}
825
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100826void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
827 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700828{
Youquan Song3481f212008-10-16 16:31:55 -0700829 struct qi_desc desc;
830
Youquan Song3481f212008-10-16 16:31:55 -0700831 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
832 | QI_CC_GRAN(type) | QI_CC_TYPE;
833 desc.high = 0;
834
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100835 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700836}
837
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100838void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
839 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700840{
841 u8 dw = 0, dr = 0;
842
843 struct qi_desc desc;
844 int ih = 0;
845
Youquan Song3481f212008-10-16 16:31:55 -0700846 if (cap_write_drain(iommu->cap))
847 dw = 1;
848
849 if (cap_read_drain(iommu->cap))
850 dr = 1;
851
852 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
853 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
854 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
855 | QI_IOTLB_AM(size_order);
856
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100857 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700858}
859
Suresh Siddhafe962e92008-07-10 11:16:42 -0700860/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700861 * Disable Queued Invalidation interface.
862 */
863void dmar_disable_qi(struct intel_iommu *iommu)
864{
865 unsigned long flags;
866 u32 sts;
867 cycles_t start_time = get_cycles();
868
869 if (!ecap_qis(iommu->ecap))
870 return;
871
872 spin_lock_irqsave(&iommu->register_lock, flags);
873
874 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
875 if (!(sts & DMA_GSTS_QIES))
876 goto end;
877
878 /*
879 * Give a chance to HW to complete the pending invalidation requests.
880 */
881 while ((readl(iommu->reg + DMAR_IQT_REG) !=
882 readl(iommu->reg + DMAR_IQH_REG)) &&
883 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
884 cpu_relax();
885
886 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700887 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
888
889 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
890 !(sts & DMA_GSTS_QIES), sts);
891end:
892 spin_unlock_irqrestore(&iommu->register_lock, flags);
893}
894
895/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700896 * Enable queued invalidation.
897 */
898static void __dmar_enable_qi(struct intel_iommu *iommu)
899{
David Woodhousec416daa2009-05-10 20:30:58 +0100900 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700901 unsigned long flags;
902 struct q_inval *qi = iommu->qi;
903
904 qi->free_head = qi->free_tail = 0;
905 qi->free_cnt = QI_LENGTH;
906
907 spin_lock_irqsave(&iommu->register_lock, flags);
908
909 /* write zero to the tail reg */
910 writel(0, iommu->reg + DMAR_IQT_REG);
911
912 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
913
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700914 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +0100915 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700916
917 /* Make sure hardware complete it */
918 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
919
920 spin_unlock_irqrestore(&iommu->register_lock, flags);
921}
922
923/*
Suresh Siddhafe962e92008-07-10 11:16:42 -0700924 * Enable Queued Invalidation interface. This is a must to support
925 * interrupt-remapping. Also used by DMA-remapping, which replaces
926 * register based IOTLB invalidation.
927 */
928int dmar_enable_qi(struct intel_iommu *iommu)
929{
Suresh Siddhafe962e92008-07-10 11:16:42 -0700930 struct q_inval *qi;
931
932 if (!ecap_qis(iommu->ecap))
933 return -ENOENT;
934
935 /*
936 * queued invalidation is already setup and enabled.
937 */
938 if (iommu->qi)
939 return 0;
940
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700941 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700942 if (!iommu->qi)
943 return -ENOMEM;
944
945 qi = iommu->qi;
946
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700947 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
Suresh Siddhafe962e92008-07-10 11:16:42 -0700948 if (!qi->desc) {
949 kfree(qi);
950 iommu->qi = 0;
951 return -ENOMEM;
952 }
953
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700954 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700955 if (!qi->desc_status) {
956 free_page((unsigned long) qi->desc);
957 kfree(qi);
958 iommu->qi = 0;
959 return -ENOMEM;
960 }
961
962 qi->free_head = qi->free_tail = 0;
963 qi->free_cnt = QI_LENGTH;
964
965 spin_lock_init(&qi->q_lock);
966
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700967 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700968
969 return 0;
970}
Suresh Siddha0ac24912009-03-16 17:04:54 -0700971
972/* iommu interrupt handling. Most stuff are MSI-like. */
973
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700974enum faulttype {
975 DMA_REMAP,
976 INTR_REMAP,
977 UNKNOWN,
978};
979
980static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -0700981{
982 "Software",
983 "Present bit in root entry is clear",
984 "Present bit in context entry is clear",
985 "Invalid context entry",
986 "Access beyond MGAW",
987 "PTE Write access is not set",
988 "PTE Read access is not set",
989 "Next page table ptr is invalid",
990 "Root table address invalid",
991 "Context table ptr is invalid",
992 "non-zero reserved fields in RTP",
993 "non-zero reserved fields in CTP",
994 "non-zero reserved fields in PTE",
995};
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700996
997static const char *intr_remap_fault_reasons[] =
998{
999 "Detected reserved fields in the decoded interrupt-remapped request",
1000 "Interrupt index exceeded the interrupt-remapping table size",
1001 "Present field in the IRTE entry is clear",
1002 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1003 "Detected reserved fields in the IRTE entry",
1004 "Blocked a compatibility format interrupt request",
1005 "Blocked an interrupt request due to source-id verification failure",
1006};
1007
Suresh Siddha0ac24912009-03-16 17:04:54 -07001008#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1009
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001010const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001011{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001012 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1013 ARRAY_SIZE(intr_remap_fault_reasons))) {
1014 *fault_type = INTR_REMAP;
1015 return intr_remap_fault_reasons[fault_reason - 0x20];
1016 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1017 *fault_type = DMA_REMAP;
1018 return dma_remap_fault_reasons[fault_reason];
1019 } else {
1020 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001021 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001022 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001023}
1024
1025void dmar_msi_unmask(unsigned int irq)
1026{
1027 struct intel_iommu *iommu = get_irq_data(irq);
1028 unsigned long flag;
1029
1030 /* unmask it */
1031 spin_lock_irqsave(&iommu->register_lock, flag);
1032 writel(0, iommu->reg + DMAR_FECTL_REG);
1033 /* Read a reg to force flush the post write */
1034 readl(iommu->reg + DMAR_FECTL_REG);
1035 spin_unlock_irqrestore(&iommu->register_lock, flag);
1036}
1037
1038void dmar_msi_mask(unsigned int irq)
1039{
1040 unsigned long flag;
1041 struct intel_iommu *iommu = get_irq_data(irq);
1042
1043 /* mask it */
1044 spin_lock_irqsave(&iommu->register_lock, flag);
1045 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1046 /* Read a reg to force flush the post write */
1047 readl(iommu->reg + DMAR_FECTL_REG);
1048 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049}
1050
1051void dmar_msi_write(int irq, struct msi_msg *msg)
1052{
1053 struct intel_iommu *iommu = get_irq_data(irq);
1054 unsigned long flag;
1055
1056 spin_lock_irqsave(&iommu->register_lock, flag);
1057 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1058 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1059 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1060 spin_unlock_irqrestore(&iommu->register_lock, flag);
1061}
1062
1063void dmar_msi_read(int irq, struct msi_msg *msg)
1064{
1065 struct intel_iommu *iommu = get_irq_data(irq);
1066 unsigned long flag;
1067
1068 spin_lock_irqsave(&iommu->register_lock, flag);
1069 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1070 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1071 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1072 spin_unlock_irqrestore(&iommu->register_lock, flag);
1073}
1074
1075static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1076 u8 fault_reason, u16 source_id, unsigned long long addr)
1077{
1078 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001079 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001080
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001081 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001082
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001083 if (fault_type == INTR_REMAP)
1084 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1085 "fault index %llx\n"
1086 "INTR-REMAP:[fault reason %02d] %s\n",
1087 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1088 PCI_FUNC(source_id & 0xFF), addr >> 48,
1089 fault_reason, reason);
1090 else
1091 printk(KERN_ERR
1092 "DMAR:[%s] Request device [%02x:%02x.%d] "
1093 "fault addr %llx \n"
1094 "DMAR:[fault reason %02d] %s\n",
1095 (type ? "DMA Read" : "DMA Write"),
1096 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1097 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001098 return 0;
1099}
1100
1101#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001102irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001103{
1104 struct intel_iommu *iommu = dev_id;
1105 int reg, fault_index;
1106 u32 fault_status;
1107 unsigned long flag;
1108
1109 spin_lock_irqsave(&iommu->register_lock, flag);
1110 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001111 if (fault_status)
1112 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1113 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001114
1115 /* TBD: ignore advanced fault log currently */
1116 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001117 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001118
1119 fault_index = dma_fsts_fault_record_index(fault_status);
1120 reg = cap_fault_reg_offset(iommu->cap);
1121 while (1) {
1122 u8 fault_reason;
1123 u16 source_id;
1124 u64 guest_addr;
1125 int type;
1126 u32 data;
1127
1128 /* highest 32 bits */
1129 data = readl(iommu->reg + reg +
1130 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1131 if (!(data & DMA_FRCD_F))
1132 break;
1133
1134 fault_reason = dma_frcd_fault_reason(data);
1135 type = dma_frcd_type(data);
1136
1137 data = readl(iommu->reg + reg +
1138 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1139 source_id = dma_frcd_source_id(data);
1140
1141 guest_addr = dmar_readq(iommu->reg + reg +
1142 fault_index * PRIMARY_FAULT_REG_LEN);
1143 guest_addr = dma_frcd_page_addr(guest_addr);
1144 /* clear the fault */
1145 writel(DMA_FRCD_F, iommu->reg + reg +
1146 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1147
1148 spin_unlock_irqrestore(&iommu->register_lock, flag);
1149
1150 dmar_fault_do_one(iommu, type, fault_reason,
1151 source_id, guest_addr);
1152
1153 fault_index++;
1154 if (fault_index > cap_num_fault_regs(iommu->cap))
1155 fault_index = 0;
1156 spin_lock_irqsave(&iommu->register_lock, flag);
1157 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001158clear_rest:
1159 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001160 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001161 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001162
1163 spin_unlock_irqrestore(&iommu->register_lock, flag);
1164 return IRQ_HANDLED;
1165}
1166
1167int dmar_set_interrupt(struct intel_iommu *iommu)
1168{
1169 int irq, ret;
1170
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001171 /*
1172 * Check if the fault interrupt is already initialized.
1173 */
1174 if (iommu->irq)
1175 return 0;
1176
Suresh Siddha0ac24912009-03-16 17:04:54 -07001177 irq = create_irq();
1178 if (!irq) {
1179 printk(KERN_ERR "IOMMU: no free vectors\n");
1180 return -EINVAL;
1181 }
1182
1183 set_irq_data(irq, iommu);
1184 iommu->irq = irq;
1185
1186 ret = arch_setup_dmar_msi(irq);
1187 if (ret) {
1188 set_irq_data(irq, NULL);
1189 iommu->irq = 0;
1190 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001191 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001192 }
1193
Suresh Siddha0ac24912009-03-16 17:04:54 -07001194 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1195 if (ret)
1196 printk(KERN_ERR "IOMMU: can't request irq\n");
1197 return ret;
1198}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001199
1200int __init enable_drhd_fault_handling(void)
1201{
1202 struct dmar_drhd_unit *drhd;
1203
1204 /*
1205 * Enable fault control interrupt.
1206 */
1207 for_each_drhd_unit(drhd) {
1208 int ret;
1209 struct intel_iommu *iommu = drhd->iommu;
1210 ret = dmar_set_interrupt(iommu);
1211
1212 if (ret) {
1213 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1214 " interrupt, ret %d\n",
1215 (unsigned long long)drhd->reg_base_addr, ret);
1216 return -1;
1217 }
1218 }
1219
1220 return 0;
1221}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001222
1223/*
1224 * Re-enable Queued Invalidation interface.
1225 */
1226int dmar_reenable_qi(struct intel_iommu *iommu)
1227{
1228 if (!ecap_qis(iommu->ecap))
1229 return -ENOENT;
1230
1231 if (!iommu->qi)
1232 return -ENOENT;
1233
1234 /*
1235 * First disable queued invalidation.
1236 */
1237 dmar_disable_qi(iommu);
1238 /*
1239 * Then enable queued invalidation again. Since there is no pending
1240 * invalidation requests now, it's safe to re-enable queued
1241 * invalidation.
1242 */
1243 __dmar_enable_qi(iommu);
1244
1245 return 0;
1246}