blob: d3d86b749eee1262fd3cfb416b6eaf5d04bd6adc [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070036
37#undef PREFIX
38#define PREFIX "DMAR:"
39
40/* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
43 */
44LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070045
46static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080047static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070048
49static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
50{
51 /*
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
53 * the very end.
54 */
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
57 else
58 list_add(&drhd->list, &dmar_drhd_units);
59}
60
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070061static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
63{
64 struct pci_bus *bus;
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
67 int count;
68
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
73
74 while (count) {
75 if (pdev)
76 pci_dev_put(pdev);
77 /*
78 * Some BIOSes list non-exist devices in DMAR table, just
79 * ignore it
80 */
81 if (!bus) {
82 printk(KERN_WARNING
83 PREFIX "Device scope bus [%d] not found\n",
84 scope->bus);
85 break;
86 }
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
88 if (!pdev) {
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
92 break;
93 }
94 path ++;
95 count --;
96 bus = pdev->subordinate;
97 }
98 if (!pdev) {
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
102 *dev = NULL;
103 return 0;
104 }
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
118static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
136 }
137 if (*cnt == 0)
138 return 0;
139
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
143
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
153 kfree(*devices);
154 return ret;
155 }
156 index ++;
157 }
158 start += scope->length;
159 }
160
161 return 0;
162}
163
164/**
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
168 */
169static int __init
170dmar_parse_one_drhd(struct acpi_dmar_header *header)
171{
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
174 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700175
David Woodhousee523b382009-04-10 22:27:48 -0700176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
187 if (!dmaru)
188 return -ENOMEM;
189
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700190 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700191 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100192 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
194
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700195 ret = alloc_iommu(dmaru);
196 if (ret) {
197 kfree(dmaru);
198 return ret;
199 }
200 dmar_register_drhd_unit(dmaru);
201 return 0;
202}
203
David Woodhousef82851a2008-10-18 15:43:14 +0100204static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700205{
206 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100207 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208
209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
210
Yu Zhao2e824f72008-12-22 16:54:58 +0800211 if (dmaru->include_all)
212 return 0;
213
214 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700215 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700216 &dmaru->devices_cnt, &dmaru->devices,
217 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700218 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700220 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700221 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700222 return ret;
223}
224
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700225#ifdef CONFIG_DMAR
226LIST_HEAD(dmar_rmrr_units);
227
228static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
229{
230 list_add(&rmrr->list, &dmar_rmrr_units);
231}
232
233
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700234static int __init
235dmar_parse_one_rmrr(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_reserved_memory *rmrr;
238 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700239
240 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
241 if (!rmrru)
242 return -ENOMEM;
243
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700244 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700245 rmrr = (struct acpi_dmar_reserved_memory *)header;
246 rmrru->base_address = rmrr->base_address;
247 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700248
249 dmar_register_rmrr_unit(rmrru);
250 return 0;
251}
252
253static int __init
254rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
255{
256 struct acpi_dmar_reserved_memory *rmrr;
257 int ret;
258
259 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700260 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700261 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700262 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
263
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700264 if (ret || (rmrru->devices_cnt == 0)) {
265 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700266 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700267 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700268 return ret;
269}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700270#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700271
272static void __init
273dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
274{
275 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr;
277
278 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header;
281 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700283 drhd->flags, (unsigned long long)drhd->address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700284 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header;
287
288 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700290 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700292 break;
293 }
294}
295
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700296/**
297 * dmar_table_detect - checks to see if the platform supports DMAR devices
298 */
299static int __init dmar_table_detect(void)
300{
301 acpi_status status = AE_OK;
302
303 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800304 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
305 (struct acpi_table_header **)&dmar_tbl,
306 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700307
308 if (ACPI_SUCCESS(status) && !dmar_tbl) {
309 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
310 status = AE_NOT_FOUND;
311 }
312
313 return (ACPI_SUCCESS(status) ? 1 : 0);
314}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700315
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700316/**
317 * parse_dmar_table - parses the DMA reporting table
318 */
319static int __init
320parse_dmar_table(void)
321{
322 struct acpi_table_dmar *dmar;
323 struct acpi_dmar_header *entry_header;
324 int ret = 0;
325
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700326 /*
327 * Do it again, earlier dmar_tbl mapping could be mapped with
328 * fixed map.
329 */
330 dmar_table_detect();
331
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700332 dmar = (struct acpi_table_dmar *)dmar_tbl;
333 if (!dmar)
334 return -ENODEV;
335
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700336 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800337 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700338 return -EINVAL;
339 }
340
341 printk (KERN_INFO PREFIX "Host address width %d\n",
342 dmar->width + 1);
343
344 entry_header = (struct acpi_dmar_header *)(dmar + 1);
345 while (((unsigned long)entry_header) <
346 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800347 /* Avoid looping forever on bad ACPI tables */
348 if (entry_header->length == 0) {
349 printk(KERN_WARNING PREFIX
350 "Invalid 0-length structure\n");
351 ret = -EINVAL;
352 break;
353 }
354
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700355 dmar_table_print_dmar_entry(entry_header);
356
357 switch (entry_header->type) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
359 ret = dmar_parse_one_drhd(entry_header);
360 break;
361 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700362#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700363 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700364#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700365 break;
366 default:
367 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n");
369 ret = 0; /* for forward compatibility */
370 break;
371 }
372 if (ret)
373 break;
374
375 entry_header = ((void *)entry_header + entry_header->length);
376 }
377 return ret;
378}
379
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700380int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
381 struct pci_dev *dev)
382{
383 int index;
384
385 while (dev) {
386 for (index = 0; index < cnt; index++)
387 if (dev == devices[index])
388 return 1;
389
390 /* Check our parent */
391 dev = dev->bus->self;
392 }
393
394 return 0;
395}
396
397struct dmar_drhd_unit *
398dmar_find_matched_drhd_unit(struct pci_dev *dev)
399{
Yu Zhao2e824f72008-12-22 16:54:58 +0800400 struct dmar_drhd_unit *dmaru = NULL;
401 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700402
Yu Zhao2e824f72008-12-22 16:54:58 +0800403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
407
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
411
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700415 }
416
417 return NULL;
418}
419
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700420int __init dmar_dev_scope_init(void)
421{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700422 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700423 int ret = -ENODEV;
424
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700425 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700426 ret = dmar_parse_dev(drhd);
427 if (ret)
428 return ret;
429 }
430
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700431#ifdef CONFIG_DMAR
432 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700433 struct dmar_rmrr_unit *rmrr, *rmrr_n;
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700435 ret = rmrr_parse_dev(rmrr);
436 if (ret)
437 return ret;
438 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700439 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700440#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700441
442 return ret;
443}
444
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700445
446int __init dmar_table_init(void)
447{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700448 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800449 int ret;
450
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700451 if (dmar_table_initialized)
452 return 0;
453
454 dmar_table_initialized = 1;
455
Fenghua Yu093f87d2007-11-21 15:07:14 -0800456 ret = parse_dmar_table();
457 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700458 if (ret != -ENODEV)
459 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800460 return ret;
461 }
462
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700463 if (list_empty(&dmar_drhd_units)) {
464 printk(KERN_INFO PREFIX "No DMAR devices found\n");
465 return -ENODEV;
466 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800467
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700468#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700469 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800470 printk(KERN_INFO PREFIX "No RMRR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700471#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800472
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700473#ifdef CONFIG_INTR_REMAP
474 parse_ioapics_under_ir();
475#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700476 return 0;
477}
478
Suresh Siddha2ae21012008-07-10 11:16:43 -0700479void __init detect_intel_iommu(void)
480{
481 int ret;
482
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700483 ret = dmar_table_detect();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700484
Suresh Siddha2ae21012008-07-10 11:16:43 -0700485 {
Youquan Songcacd4212008-10-16 16:31:57 -0700486#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700487 struct acpi_table_dmar *dmar;
488 /*
489 * for now we will disable dma-remapping when interrupt
490 * remapping is enabled.
491 * When support for queued invalidation for IOTLB invalidation
492 * is added, we will not need this any more.
493 */
494 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700495 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700496 printk(KERN_INFO
497 "Queued invalidation will be enabled to support "
498 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700499#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700500#ifdef CONFIG_DMAR
Suresh Siddha2ae21012008-07-10 11:16:43 -0700501 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
502 !dmar_disabled)
503 iommu_detected = 1;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700504#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700505 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800506 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700507 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700508}
509
510
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700511int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700512{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700513 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700514 int map_size;
515 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700516 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100517 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700518 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700519
520 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
521 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700522 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700523
524 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700525 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700526
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700527 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700528 if (!iommu->reg) {
529 printk(KERN_ERR "IOMMU: can't map the region\n");
530 goto error;
531 }
532 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
533 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
534
Joerg Roedel43f73922009-01-03 23:56:27 +0100535#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800536 agaw = iommu_calculate_agaw(iommu);
537 if (agaw < 0) {
538 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700539 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
540 iommu->seq_id);
541 goto error;
542 }
543 msagaw = iommu_calculate_max_sagaw(iommu);
544 if (msagaw < 0) {
545 printk(KERN_ERR
546 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800547 iommu->seq_id);
548 goto error;
549 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100550#endif
Weidong Han1b573682008-12-08 15:34:06 +0800551 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700552 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800553
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700554 /* the registers might be more than one page */
555 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
556 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700557 map_size = VTD_PAGE_ALIGN(map_size);
558 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700559 iounmap(iommu->reg);
560 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
561 if (!iommu->reg) {
562 printk(KERN_ERR "IOMMU: can't map the region\n");
563 goto error;
564 }
565 }
566
567 ver = readl(iommu->reg + DMAR_VER_REG);
568 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700569 (unsigned long long)drhd->reg_base_addr,
570 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
571 (unsigned long long)iommu->cap,
572 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700573
574 spin_lock_init(&iommu->register_lock);
575
576 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700577 return 0;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700578error:
579 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700580 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700581}
582
583void free_iommu(struct intel_iommu *iommu)
584{
585 if (!iommu)
586 return;
587
588#ifdef CONFIG_DMAR
589 free_dmar_iommu(iommu);
590#endif
591
592 if (iommu->reg)
593 iounmap(iommu->reg);
594 kfree(iommu);
595}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700596
597/*
598 * Reclaim all the submitted descriptors which have completed its work.
599 */
600static inline void reclaim_free_desc(struct q_inval *qi)
601{
602 while (qi->desc_status[qi->free_tail] == QI_DONE) {
603 qi->desc_status[qi->free_tail] = QI_FREE;
604 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
605 qi->free_cnt++;
606 }
607}
608
Yu Zhao704126a2009-01-04 16:28:52 +0800609static int qi_check_fault(struct intel_iommu *iommu, int index)
610{
611 u32 fault;
612 int head;
613 struct q_inval *qi = iommu->qi;
614 int wait_index = (index + 1) % QI_LENGTH;
615
616 fault = readl(iommu->reg + DMAR_FSTS_REG);
617
618 /*
619 * If IQE happens, the head points to the descriptor associated
620 * with the error. No new descriptors are fetched until the IQE
621 * is cleared.
622 */
623 if (fault & DMA_FSTS_IQE) {
624 head = readl(iommu->reg + DMAR_IQH_REG);
625 if ((head >> 4) == index) {
626 memcpy(&qi->desc[index], &qi->desc[wait_index],
627 sizeof(struct qi_desc));
628 __iommu_flush_cache(iommu, &qi->desc[index],
629 sizeof(struct qi_desc));
630 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
631 return -EINVAL;
632 }
633 }
634
635 return 0;
636}
637
Suresh Siddhafe962e92008-07-10 11:16:42 -0700638/*
639 * Submit the queued invalidation descriptor to the remapping
640 * hardware unit and wait for its completion.
641 */
Yu Zhao704126a2009-01-04 16:28:52 +0800642int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700643{
Yu Zhao704126a2009-01-04 16:28:52 +0800644 int rc = 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700645 struct q_inval *qi = iommu->qi;
646 struct qi_desc *hw, wait_desc;
647 int wait_index, index;
648 unsigned long flags;
649
650 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800651 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700652
653 hw = qi->desc;
654
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700655 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700656 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700657 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700658 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700659 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700660 }
661
662 index = qi->free_head;
663 wait_index = (index + 1) % QI_LENGTH;
664
665 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
666
667 hw[index] = *desc;
668
Yu Zhao704126a2009-01-04 16:28:52 +0800669 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
670 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700671 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
672
673 hw[wait_index] = wait_desc;
674
675 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
676 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
677
678 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
679 qi->free_cnt -= 2;
680
Suresh Siddhafe962e92008-07-10 11:16:42 -0700681 /*
682 * update the HW tail register indicating the presence of
683 * new descriptors.
684 */
685 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700686
687 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700688 /*
689 * We will leave the interrupts disabled, to prevent interrupt
690 * context to queue another cmd while a cmd is already submitted
691 * and waiting for completion on this cpu. This is to avoid
692 * a deadlock where the interrupt context can wait indefinitely
693 * for free slots in the queue.
694 */
Yu Zhao704126a2009-01-04 16:28:52 +0800695 rc = qi_check_fault(iommu, index);
696 if (rc)
697 goto out;
698
Suresh Siddhafe962e92008-07-10 11:16:42 -0700699 spin_unlock(&qi->q_lock);
700 cpu_relax();
701 spin_lock(&qi->q_lock);
702 }
Yu Zhao704126a2009-01-04 16:28:52 +0800703out:
704 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700705
706 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700707 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800708
709 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700710}
711
712/*
713 * Flush the global interrupt entry cache.
714 */
715void qi_global_iec(struct intel_iommu *iommu)
716{
717 struct qi_desc desc;
718
719 desc.low = QI_IEC_TYPE;
720 desc.high = 0;
721
Yu Zhao704126a2009-01-04 16:28:52 +0800722 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700723 qi_submit_sync(&desc, iommu);
724}
725
Youquan Song3481f212008-10-16 16:31:55 -0700726int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
727 u64 type, int non_present_entry_flush)
728{
Youquan Song3481f212008-10-16 16:31:55 -0700729 struct qi_desc desc;
730
731 if (non_present_entry_flush) {
732 if (!cap_caching_mode(iommu->cap))
733 return 1;
734 else
735 did = 0;
736 }
737
738 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
739 | QI_CC_GRAN(type) | QI_CC_TYPE;
740 desc.high = 0;
741
Yu Zhao704126a2009-01-04 16:28:52 +0800742 return qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700743}
744
745int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
746 unsigned int size_order, u64 type,
747 int non_present_entry_flush)
748{
749 u8 dw = 0, dr = 0;
750
751 struct qi_desc desc;
752 int ih = 0;
753
754 if (non_present_entry_flush) {
755 if (!cap_caching_mode(iommu->cap))
756 return 1;
757 else
758 did = 0;
759 }
760
761 if (cap_write_drain(iommu->cap))
762 dw = 1;
763
764 if (cap_read_drain(iommu->cap))
765 dr = 1;
766
767 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
768 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
769 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
770 | QI_IOTLB_AM(size_order);
771
Yu Zhao704126a2009-01-04 16:28:52 +0800772 return qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700773}
774
Suresh Siddhafe962e92008-07-10 11:16:42 -0700775/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700776 * Disable Queued Invalidation interface.
777 */
778void dmar_disable_qi(struct intel_iommu *iommu)
779{
780 unsigned long flags;
781 u32 sts;
782 cycles_t start_time = get_cycles();
783
784 if (!ecap_qis(iommu->ecap))
785 return;
786
787 spin_lock_irqsave(&iommu->register_lock, flags);
788
789 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
790 if (!(sts & DMA_GSTS_QIES))
791 goto end;
792
793 /*
794 * Give a chance to HW to complete the pending invalidation requests.
795 */
796 while ((readl(iommu->reg + DMAR_IQT_REG) !=
797 readl(iommu->reg + DMAR_IQH_REG)) &&
798 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
799 cpu_relax();
800
801 iommu->gcmd &= ~DMA_GCMD_QIE;
802
803 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
804
805 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
806 !(sts & DMA_GSTS_QIES), sts);
807end:
808 spin_unlock_irqrestore(&iommu->register_lock, flags);
809}
810
811/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700812 * Enable queued invalidation.
813 */
814static void __dmar_enable_qi(struct intel_iommu *iommu)
815{
816 u32 cmd, sts;
817 unsigned long flags;
818 struct q_inval *qi = iommu->qi;
819
820 qi->free_head = qi->free_tail = 0;
821 qi->free_cnt = QI_LENGTH;
822
823 spin_lock_irqsave(&iommu->register_lock, flags);
824
825 /* write zero to the tail reg */
826 writel(0, iommu->reg + DMAR_IQT_REG);
827
828 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
829
830 cmd = iommu->gcmd | DMA_GCMD_QIE;
831 iommu->gcmd |= DMA_GCMD_QIE;
832 writel(cmd, iommu->reg + DMAR_GCMD_REG);
833
834 /* Make sure hardware complete it */
835 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
836
837 spin_unlock_irqrestore(&iommu->register_lock, flags);
838}
839
840/*
Suresh Siddhafe962e92008-07-10 11:16:42 -0700841 * Enable Queued Invalidation interface. This is a must to support
842 * interrupt-remapping. Also used by DMA-remapping, which replaces
843 * register based IOTLB invalidation.
844 */
845int dmar_enable_qi(struct intel_iommu *iommu)
846{
Suresh Siddhafe962e92008-07-10 11:16:42 -0700847 struct q_inval *qi;
848
849 if (!ecap_qis(iommu->ecap))
850 return -ENOENT;
851
852 /*
853 * queued invalidation is already setup and enabled.
854 */
855 if (iommu->qi)
856 return 0;
857
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700858 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700859 if (!iommu->qi)
860 return -ENOMEM;
861
862 qi = iommu->qi;
863
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700864 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
Suresh Siddhafe962e92008-07-10 11:16:42 -0700865 if (!qi->desc) {
866 kfree(qi);
867 iommu->qi = 0;
868 return -ENOMEM;
869 }
870
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700871 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700872 if (!qi->desc_status) {
873 free_page((unsigned long) qi->desc);
874 kfree(qi);
875 iommu->qi = 0;
876 return -ENOMEM;
877 }
878
879 qi->free_head = qi->free_tail = 0;
880 qi->free_cnt = QI_LENGTH;
881
882 spin_lock_init(&qi->q_lock);
883
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700884 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700885
886 return 0;
887}
Suresh Siddha0ac24912009-03-16 17:04:54 -0700888
889/* iommu interrupt handling. Most stuff are MSI-like. */
890
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700891enum faulttype {
892 DMA_REMAP,
893 INTR_REMAP,
894 UNKNOWN,
895};
896
897static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -0700898{
899 "Software",
900 "Present bit in root entry is clear",
901 "Present bit in context entry is clear",
902 "Invalid context entry",
903 "Access beyond MGAW",
904 "PTE Write access is not set",
905 "PTE Read access is not set",
906 "Next page table ptr is invalid",
907 "Root table address invalid",
908 "Context table ptr is invalid",
909 "non-zero reserved fields in RTP",
910 "non-zero reserved fields in CTP",
911 "non-zero reserved fields in PTE",
912};
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700913
914static const char *intr_remap_fault_reasons[] =
915{
916 "Detected reserved fields in the decoded interrupt-remapped request",
917 "Interrupt index exceeded the interrupt-remapping table size",
918 "Present field in the IRTE entry is clear",
919 "Error accessing interrupt-remapping table pointed by IRTA_REG",
920 "Detected reserved fields in the IRTE entry",
921 "Blocked a compatibility format interrupt request",
922 "Blocked an interrupt request due to source-id verification failure",
923};
924
Suresh Siddha0ac24912009-03-16 17:04:54 -0700925#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
926
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700927const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -0700928{
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700929 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
930 ARRAY_SIZE(intr_remap_fault_reasons))) {
931 *fault_type = INTR_REMAP;
932 return intr_remap_fault_reasons[fault_reason - 0x20];
933 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
934 *fault_type = DMA_REMAP;
935 return dma_remap_fault_reasons[fault_reason];
936 } else {
937 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -0700938 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700939 }
Suresh Siddha0ac24912009-03-16 17:04:54 -0700940}
941
942void dmar_msi_unmask(unsigned int irq)
943{
944 struct intel_iommu *iommu = get_irq_data(irq);
945 unsigned long flag;
946
947 /* unmask it */
948 spin_lock_irqsave(&iommu->register_lock, flag);
949 writel(0, iommu->reg + DMAR_FECTL_REG);
950 /* Read a reg to force flush the post write */
951 readl(iommu->reg + DMAR_FECTL_REG);
952 spin_unlock_irqrestore(&iommu->register_lock, flag);
953}
954
955void dmar_msi_mask(unsigned int irq)
956{
957 unsigned long flag;
958 struct intel_iommu *iommu = get_irq_data(irq);
959
960 /* mask it */
961 spin_lock_irqsave(&iommu->register_lock, flag);
962 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
963 /* Read a reg to force flush the post write */
964 readl(iommu->reg + DMAR_FECTL_REG);
965 spin_unlock_irqrestore(&iommu->register_lock, flag);
966}
967
968void dmar_msi_write(int irq, struct msi_msg *msg)
969{
970 struct intel_iommu *iommu = get_irq_data(irq);
971 unsigned long flag;
972
973 spin_lock_irqsave(&iommu->register_lock, flag);
974 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
975 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
976 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
977 spin_unlock_irqrestore(&iommu->register_lock, flag);
978}
979
980void dmar_msi_read(int irq, struct msi_msg *msg)
981{
982 struct intel_iommu *iommu = get_irq_data(irq);
983 unsigned long flag;
984
985 spin_lock_irqsave(&iommu->register_lock, flag);
986 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
987 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
988 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
989 spin_unlock_irqrestore(&iommu->register_lock, flag);
990}
991
992static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
993 u8 fault_reason, u16 source_id, unsigned long long addr)
994{
995 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700996 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -0700997
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700998 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -0700999
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001000 if (fault_type == INTR_REMAP)
1001 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1002 "fault index %llx\n"
1003 "INTR-REMAP:[fault reason %02d] %s\n",
1004 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1005 PCI_FUNC(source_id & 0xFF), addr >> 48,
1006 fault_reason, reason);
1007 else
1008 printk(KERN_ERR
1009 "DMAR:[%s] Request device [%02x:%02x.%d] "
1010 "fault addr %llx \n"
1011 "DMAR:[fault reason %02d] %s\n",
1012 (type ? "DMA Read" : "DMA Write"),
1013 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1014 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001015 return 0;
1016}
1017
1018#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001019irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001020{
1021 struct intel_iommu *iommu = dev_id;
1022 int reg, fault_index;
1023 u32 fault_status;
1024 unsigned long flag;
1025
1026 spin_lock_irqsave(&iommu->register_lock, flag);
1027 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001028 if (fault_status)
1029 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1030 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001031
1032 /* TBD: ignore advanced fault log currently */
1033 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001034 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001035
1036 fault_index = dma_fsts_fault_record_index(fault_status);
1037 reg = cap_fault_reg_offset(iommu->cap);
1038 while (1) {
1039 u8 fault_reason;
1040 u16 source_id;
1041 u64 guest_addr;
1042 int type;
1043 u32 data;
1044
1045 /* highest 32 bits */
1046 data = readl(iommu->reg + reg +
1047 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1048 if (!(data & DMA_FRCD_F))
1049 break;
1050
1051 fault_reason = dma_frcd_fault_reason(data);
1052 type = dma_frcd_type(data);
1053
1054 data = readl(iommu->reg + reg +
1055 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1056 source_id = dma_frcd_source_id(data);
1057
1058 guest_addr = dmar_readq(iommu->reg + reg +
1059 fault_index * PRIMARY_FAULT_REG_LEN);
1060 guest_addr = dma_frcd_page_addr(guest_addr);
1061 /* clear the fault */
1062 writel(DMA_FRCD_F, iommu->reg + reg +
1063 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1064
1065 spin_unlock_irqrestore(&iommu->register_lock, flag);
1066
1067 dmar_fault_do_one(iommu, type, fault_reason,
1068 source_id, guest_addr);
1069
1070 fault_index++;
1071 if (fault_index > cap_num_fault_regs(iommu->cap))
1072 fault_index = 0;
1073 spin_lock_irqsave(&iommu->register_lock, flag);
1074 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001075clear_rest:
1076 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001077 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001078 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001079
1080 spin_unlock_irqrestore(&iommu->register_lock, flag);
1081 return IRQ_HANDLED;
1082}
1083
1084int dmar_set_interrupt(struct intel_iommu *iommu)
1085{
1086 int irq, ret;
1087
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001088 /*
1089 * Check if the fault interrupt is already initialized.
1090 */
1091 if (iommu->irq)
1092 return 0;
1093
Suresh Siddha0ac24912009-03-16 17:04:54 -07001094 irq = create_irq();
1095 if (!irq) {
1096 printk(KERN_ERR "IOMMU: no free vectors\n");
1097 return -EINVAL;
1098 }
1099
1100 set_irq_data(irq, iommu);
1101 iommu->irq = irq;
1102
1103 ret = arch_setup_dmar_msi(irq);
1104 if (ret) {
1105 set_irq_data(irq, NULL);
1106 iommu->irq = 0;
1107 destroy_irq(irq);
1108 return 0;
1109 }
1110
Suresh Siddha0ac24912009-03-16 17:04:54 -07001111 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1112 if (ret)
1113 printk(KERN_ERR "IOMMU: can't request irq\n");
1114 return ret;
1115}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001116
1117int __init enable_drhd_fault_handling(void)
1118{
1119 struct dmar_drhd_unit *drhd;
1120
1121 /*
1122 * Enable fault control interrupt.
1123 */
1124 for_each_drhd_unit(drhd) {
1125 int ret;
1126 struct intel_iommu *iommu = drhd->iommu;
1127 ret = dmar_set_interrupt(iommu);
1128
1129 if (ret) {
1130 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1131 " interrupt, ret %d\n",
1132 (unsigned long long)drhd->reg_base_addr, ret);
1133 return -1;
1134 }
1135 }
1136
1137 return 0;
1138}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001139
1140/*
1141 * Re-enable Queued Invalidation interface.
1142 */
1143int dmar_reenable_qi(struct intel_iommu *iommu)
1144{
1145 if (!ecap_qis(iommu->ecap))
1146 return -ENOENT;
1147
1148 if (!iommu->qi)
1149 return -ENOENT;
1150
1151 /*
1152 * First disable queued invalidation.
1153 */
1154 dmar_disable_qi(iommu);
1155 /*
1156 * Then enable queued invalidation again. Since there is no pending
1157 * invalidation requests now, it's safe to re-enable queued
1158 * invalidation.
1159 */
1160 __dmar_enable_qi(iommu);
1161
1162 return 0;
1163}