blob: 3a74e4410fc0737d47e1f294378fdb7dc07e27ad [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070039#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040040#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070041
Len Browna192a952009-07-28 16:45:54 -040042#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
44/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture.
47 */
48LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
Suresh Siddha41750d32011-08-23 17:05:18 -070050struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080051static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070052
53static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54{
55 /*
56 * add INCLUDE_ALL at the tail, so scan the list will find it at
57 * the very end.
58 */
59 if (drhd->include_all)
60 list_add_tail(&drhd->list, &dmar_drhd_units);
61 else
62 list_add(&drhd->list, &dmar_drhd_units);
63}
64
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070065static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
66 struct pci_dev **dev, u16 segment)
67{
68 struct pci_bus *bus;
69 struct pci_dev *pdev = NULL;
70 struct acpi_dmar_pci_path *path;
71 int count;
72
73 bus = pci_find_bus(segment, scope->bus);
74 path = (struct acpi_dmar_pci_path *)(scope + 1);
75 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
76 / sizeof(struct acpi_dmar_pci_path);
77
78 while (count) {
79 if (pdev)
80 pci_dev_put(pdev);
81 /*
82 * Some BIOSes list non-exist devices in DMAR table, just
83 * ignore it
84 */
85 if (!bus) {
86 printk(KERN_WARNING
87 PREFIX "Device scope bus [%d] not found\n",
88 scope->bus);
89 break;
90 }
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
93 printk(KERN_WARNING PREFIX
94 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
95 segment, bus->number, path->dev, path->fn);
96 break;
97 }
98 path ++;
99 count --;
100 bus = pdev->subordinate;
101 }
102 if (!pdev) {
103 printk(KERN_WARNING PREFIX
104 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
105 segment, scope->bus, path->dev, path->fn);
106 *dev = NULL;
107 return 0;
108 }
109 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
110 pdev->subordinate) || (scope->entry_type == \
111 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
112 pci_dev_put(pdev);
113 printk(KERN_WARNING PREFIX
114 "Device scope type does not match for %s\n",
115 pci_name(pdev));
116 return -EINVAL;
117 }
118 *dev = pdev;
119 return 0;
120}
121
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700122int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
123 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700124{
125 struct acpi_dmar_device_scope *scope;
126 void * tmp = start;
127 int index;
128 int ret;
129
130 *cnt = 0;
131 while (start < end) {
132 scope = start;
133 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
134 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
135 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100136 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 printk(KERN_WARNING PREFIX
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100138 "Unsupported device scope\n");
139 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700140 start += scope->length;
141 }
142 if (*cnt == 0)
143 return 0;
144
145 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
146 if (!*devices)
147 return -ENOMEM;
148
149 start = tmp;
150 index = 0;
151 while (start < end) {
152 scope = start;
153 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
154 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
155 ret = dmar_parse_one_dev_scope(scope,
156 &(*devices)[index], segment);
157 if (ret) {
158 kfree(*devices);
159 return ret;
160 }
161 index ++;
162 }
163 start += scope->length;
164 }
165
166 return 0;
167}
168
169/**
170 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
171 * structure which uniquely represent one DMA remapping hardware unit
172 * present in the platform
173 */
174static int __init
175dmar_parse_one_drhd(struct acpi_dmar_header *header)
176{
177 struct acpi_dmar_hardware_unit *drhd;
178 struct dmar_drhd_unit *dmaru;
179 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700180
David Woodhousee523b382009-04-10 22:27:48 -0700181 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700182 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 if (!dmaru)
184 return -ENOMEM;
185
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700186 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700187 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100188 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700189 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
190
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700191 ret = alloc_iommu(dmaru);
192 if (ret) {
193 kfree(dmaru);
194 return ret;
195 }
196 dmar_register_drhd_unit(dmaru);
197 return 0;
198}
199
David Woodhousef82851a2008-10-18 15:43:14 +0100200static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201{
202 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100203 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700204
205 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
206
Yu Zhao2e824f72008-12-22 16:54:58 +0800207 if (dmaru->include_all)
208 return 0;
209
210 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700211 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700212 &dmaru->devices_cnt, &dmaru->devices,
213 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700214 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700215 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700216 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700217 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700218 return ret;
219}
220
David Woodhouseaa697072009-10-07 12:18:00 +0100221#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700222static int __init
223dmar_parse_one_rhsa(struct acpi_dmar_header *header)
224{
225 struct acpi_dmar_rhsa *rhsa;
226 struct dmar_drhd_unit *drhd;
227
228 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100229 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700230 if (drhd->reg_base_addr == rhsa->base_address) {
231 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
232
233 if (!node_online(node))
234 node = -1;
235 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100236 return 0;
237 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700238 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100239 WARN_TAINT(
240 1, TAINT_FIRMWARE_WORKAROUND,
241 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
242 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
243 drhd->reg_base_addr,
244 dmi_get_system_info(DMI_BIOS_VENDOR),
245 dmi_get_system_info(DMI_BIOS_VERSION),
246 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700247
David Woodhouseaa697072009-10-07 12:18:00 +0100248 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700249}
David Woodhouseaa697072009-10-07 12:18:00 +0100250#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700251
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700252static void __init
253dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
254{
255 struct acpi_dmar_hardware_unit *drhd;
256 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800257 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700258 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700259
260 switch (header->type) {
261 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800262 drhd = container_of(header, struct acpi_dmar_hardware_unit,
263 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700264 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800265 "DRHD base: %#016Lx flags: %#x\n",
266 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700267 break;
268 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800269 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
270 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700271 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800272 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700273 (unsigned long long)rmrr->base_address,
274 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700275 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800276 case ACPI_DMAR_TYPE_ATSR:
277 atsr = container_of(header, struct acpi_dmar_atsr, header);
278 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
279 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700280 case ACPI_DMAR_HARDWARE_AFFINITY:
281 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
282 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
283 (unsigned long long)rhsa->base_address,
284 rhsa->proximity_domain);
285 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700286 }
287}
288
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700289/**
290 * dmar_table_detect - checks to see if the platform supports DMAR devices
291 */
292static int __init dmar_table_detect(void)
293{
294 acpi_status status = AE_OK;
295
296 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800297 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
298 (struct acpi_table_header **)&dmar_tbl,
299 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700300
301 if (ACPI_SUCCESS(status) && !dmar_tbl) {
302 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
303 status = AE_NOT_FOUND;
304 }
305
306 return (ACPI_SUCCESS(status) ? 1 : 0);
307}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700308
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700309/**
310 * parse_dmar_table - parses the DMA reporting table
311 */
312static int __init
313parse_dmar_table(void)
314{
315 struct acpi_table_dmar *dmar;
316 struct acpi_dmar_header *entry_header;
317 int ret = 0;
318
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700319 /*
320 * Do it again, earlier dmar_tbl mapping could be mapped with
321 * fixed map.
322 */
323 dmar_table_detect();
324
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700325 /*
326 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
327 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
328 */
329 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
330
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700331 dmar = (struct acpi_table_dmar *)dmar_tbl;
332 if (!dmar)
333 return -ENODEV;
334
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700335 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800336 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700337 return -EINVAL;
338 }
339
340 printk (KERN_INFO PREFIX "Host address width %d\n",
341 dmar->width + 1);
342
343 entry_header = (struct acpi_dmar_header *)(dmar + 1);
344 while (((unsigned long)entry_header) <
345 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800346 /* Avoid looping forever on bad ACPI tables */
347 if (entry_header->length == 0) {
348 printk(KERN_WARNING PREFIX
349 "Invalid 0-length structure\n");
350 ret = -EINVAL;
351 break;
352 }
353
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700354 dmar_table_print_dmar_entry(entry_header);
355
356 switch (entry_header->type) {
357 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
358 ret = dmar_parse_one_drhd(entry_header);
359 break;
360 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
361 ret = dmar_parse_one_rmrr(entry_header);
362 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800363 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800364 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800365 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700366 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100367#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700368 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100369#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700370 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700371 default:
372 printk(KERN_WARNING PREFIX
Roland Dreier4de75cf2009-09-24 01:01:29 +0100373 "Unknown DMAR structure type %d\n",
374 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700375 ret = 0; /* for forward compatibility */
376 break;
377 }
378 if (ret)
379 break;
380
381 entry_header = ((void *)entry_header + entry_header->length);
382 }
383 return ret;
384}
385
Yinghaidda56542010-04-09 01:07:55 +0100386static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700387 struct pci_dev *dev)
388{
389 int index;
390
391 while (dev) {
392 for (index = 0; index < cnt; index++)
393 if (dev == devices[index])
394 return 1;
395
396 /* Check our parent */
397 dev = dev->bus->self;
398 }
399
400 return 0;
401}
402
403struct dmar_drhd_unit *
404dmar_find_matched_drhd_unit(struct pci_dev *dev)
405{
Yu Zhao2e824f72008-12-22 16:54:58 +0800406 struct dmar_drhd_unit *dmaru = NULL;
407 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700408
Yinghaidda56542010-04-09 01:07:55 +0100409 dev = pci_physfn(dev);
410
Yu Zhao2e824f72008-12-22 16:54:58 +0800411 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
412 drhd = container_of(dmaru->hdr,
413 struct acpi_dmar_hardware_unit,
414 header);
415
416 if (dmaru->include_all &&
417 drhd->segment == pci_domain_nr(dev->bus))
418 return dmaru;
419
420 if (dmar_pci_device_match(dmaru->devices,
421 dmaru->devices_cnt, dev))
422 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700423 }
424
425 return NULL;
426}
427
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700428int __init dmar_dev_scope_init(void)
429{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700430 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700431 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700432 int ret = -ENODEV;
433
Suresh Siddhac2c72862011-08-23 17:05:19 -0700434 if (dmar_dev_scope_initialized)
435 return dmar_dev_scope_initialized;
436
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700437 if (list_empty(&dmar_drhd_units))
438 goto fail;
439
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700440 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700441 ret = dmar_parse_dev(drhd);
442 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700443 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700444 }
445
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700446 ret = dmar_parse_rmrr_atsr_dev();
447 if (ret)
448 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700449
Suresh Siddhac2c72862011-08-23 17:05:19 -0700450 dmar_dev_scope_initialized = 1;
451 return 0;
452
453fail:
454 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700455 return ret;
456}
457
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700458
459int __init dmar_table_init(void)
460{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700461 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800462 int ret;
463
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700464 if (dmar_table_initialized)
465 return 0;
466
467 dmar_table_initialized = 1;
468
Fenghua Yu093f87d2007-11-21 15:07:14 -0800469 ret = parse_dmar_table();
470 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700471 if (ret != -ENODEV)
472 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800473 return ret;
474 }
475
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700476 if (list_empty(&dmar_drhd_units)) {
477 printk(KERN_INFO PREFIX "No DMAR devices found\n");
478 return -ENODEV;
479 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800480
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700481 return 0;
482}
483
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100484static void warn_invalid_dmar(u64 addr, const char *message)
485{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100486 WARN_TAINT_ONCE(
487 1, TAINT_FIRMWARE_WORKAROUND,
488 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
489 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
490 addr, message,
491 dmi_get_system_info(DMI_BIOS_VENDOR),
492 dmi_get_system_info(DMI_BIOS_VERSION),
493 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100494}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000495
David Woodhouse86cf8982009-11-09 22:15:15 +0000496int __init check_zero_address(void)
497{
498 struct acpi_table_dmar *dmar;
499 struct acpi_dmar_header *entry_header;
500 struct acpi_dmar_hardware_unit *drhd;
501
502 dmar = (struct acpi_table_dmar *)dmar_tbl;
503 entry_header = (struct acpi_dmar_header *)(dmar + 1);
504
505 while (((unsigned long)entry_header) <
506 (((unsigned long)dmar) + dmar_tbl->length)) {
507 /* Avoid looping forever on bad ACPI tables */
508 if (entry_header->length == 0) {
509 printk(KERN_WARNING PREFIX
510 "Invalid 0-length structure\n");
511 return 0;
512 }
513
514 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000515 void __iomem *addr;
516 u64 cap, ecap;
517
David Woodhouse86cf8982009-11-09 22:15:15 +0000518 drhd = (void *)entry_header;
519 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100520 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000521 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000522 }
Chris Wright2c992202009-12-02 09:17:13 +0000523
524 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
525 if (!addr ) {
526 printk("IOMMU: can't validate: %llx\n", drhd->address);
527 goto failed;
528 }
529 cap = dmar_readq(addr + DMAR_CAP_REG);
530 ecap = dmar_readq(addr + DMAR_ECAP_REG);
531 early_iounmap(addr, VTD_PAGE_SIZE);
532 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100533 warn_invalid_dmar(drhd->address,
534 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000535 goto failed;
536 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000537 }
538
539 entry_header = ((void *)entry_header + entry_header->length);
540 }
541 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000542
543failed:
Chris Wright2c992202009-12-02 09:17:13 +0000544 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000545}
546
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400547int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700548{
549 int ret;
550
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700551 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000552 if (ret)
553 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700554 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700555 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200556
Suresh Siddha1cb11582008-07-10 11:16:51 -0700557 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700558
Suresh Siddha95a02e92012-03-30 11:47:07 -0700559 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700560 dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700561 printk(KERN_INFO
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700562 "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
563
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800564 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700565 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800566 /* Make sure ACS will be enabled */
567 pci_request_acs();
568 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700569
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900570#ifdef CONFIG_X86
571 if (ret)
572 x86_init.iommu.iommu_init = intel_iommu_init;
573#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700574 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800575 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700576 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400577
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400578 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700579}
580
581
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700582int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700583{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700584 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700585 int map_size;
586 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700587 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100588 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700589 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700590
David Woodhouse6ecbf012009-12-02 09:20:27 +0000591 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100592 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000593 return -EINVAL;
594 }
595
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700596 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
597 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700598 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700599
600 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700601 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700602
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700603 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700604 if (!iommu->reg) {
605 printk(KERN_ERR "IOMMU: can't map the region\n");
606 goto error;
607 }
608 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
609 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
610
David Woodhouse08155652009-08-04 09:17:20 +0100611 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100612 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
David Woodhouse08155652009-08-04 09:17:20 +0100613 goto err_unmap;
614 }
615
Weidong Han1b573682008-12-08 15:34:06 +0800616 agaw = iommu_calculate_agaw(iommu);
617 if (agaw < 0) {
618 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700619 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
620 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100621 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700622 }
623 msagaw = iommu_calculate_max_sagaw(iommu);
624 if (msagaw < 0) {
625 printk(KERN_ERR
626 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800627 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100628 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800629 }
630 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700631 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800632
Suresh Siddhaee34b322009-10-02 11:01:21 -0700633 iommu->node = -1;
634
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700635 /* the registers might be more than one page */
636 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
637 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700638 map_size = VTD_PAGE_ALIGN(map_size);
639 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700640 iounmap(iommu->reg);
641 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
642 if (!iommu->reg) {
643 printk(KERN_ERR "IOMMU: can't map the region\n");
644 goto error;
645 }
646 }
647
648 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100649 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
650 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700651 (unsigned long long)drhd->reg_base_addr,
652 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
653 (unsigned long long)iommu->cap,
654 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700655
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200656 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700657
658 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700659 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100660
661 err_unmap:
662 iounmap(iommu->reg);
663 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700664 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700665 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700666}
667
668void free_iommu(struct intel_iommu *iommu)
669{
670 if (!iommu)
671 return;
672
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700673 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700674
675 if (iommu->reg)
676 iounmap(iommu->reg);
677 kfree(iommu);
678}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700679
680/*
681 * Reclaim all the submitted descriptors which have completed its work.
682 */
683static inline void reclaim_free_desc(struct q_inval *qi)
684{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800685 while (qi->desc_status[qi->free_tail] == QI_DONE ||
686 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700687 qi->desc_status[qi->free_tail] = QI_FREE;
688 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
689 qi->free_cnt++;
690 }
691}
692
Yu Zhao704126a2009-01-04 16:28:52 +0800693static int qi_check_fault(struct intel_iommu *iommu, int index)
694{
695 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800696 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800697 struct q_inval *qi = iommu->qi;
698 int wait_index = (index + 1) % QI_LENGTH;
699
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800700 if (qi->desc_status[wait_index] == QI_ABORT)
701 return -EAGAIN;
702
Yu Zhao704126a2009-01-04 16:28:52 +0800703 fault = readl(iommu->reg + DMAR_FSTS_REG);
704
705 /*
706 * If IQE happens, the head points to the descriptor associated
707 * with the error. No new descriptors are fetched until the IQE
708 * is cleared.
709 */
710 if (fault & DMA_FSTS_IQE) {
711 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800712 if ((head >> DMAR_IQ_SHIFT) == index) {
713 printk(KERN_ERR "VT-d detected invalid descriptor: "
714 "low=%llx, high=%llx\n",
715 (unsigned long long)qi->desc[index].low,
716 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800717 memcpy(&qi->desc[index], &qi->desc[wait_index],
718 sizeof(struct qi_desc));
719 __iommu_flush_cache(iommu, &qi->desc[index],
720 sizeof(struct qi_desc));
721 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
722 return -EINVAL;
723 }
724 }
725
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800726 /*
727 * If ITE happens, all pending wait_desc commands are aborted.
728 * No new descriptors are fetched until the ITE is cleared.
729 */
730 if (fault & DMA_FSTS_ITE) {
731 head = readl(iommu->reg + DMAR_IQH_REG);
732 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
733 head |= 1;
734 tail = readl(iommu->reg + DMAR_IQT_REG);
735 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
736
737 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
738
739 do {
740 if (qi->desc_status[head] == QI_IN_USE)
741 qi->desc_status[head] = QI_ABORT;
742 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
743 } while (head != tail);
744
745 if (qi->desc_status[wait_index] == QI_ABORT)
746 return -EAGAIN;
747 }
748
749 if (fault & DMA_FSTS_ICE)
750 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
751
Yu Zhao704126a2009-01-04 16:28:52 +0800752 return 0;
753}
754
Suresh Siddhafe962e92008-07-10 11:16:42 -0700755/*
756 * Submit the queued invalidation descriptor to the remapping
757 * hardware unit and wait for its completion.
758 */
Yu Zhao704126a2009-01-04 16:28:52 +0800759int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700760{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800761 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700762 struct q_inval *qi = iommu->qi;
763 struct qi_desc *hw, wait_desc;
764 int wait_index, index;
765 unsigned long flags;
766
767 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800768 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700769
770 hw = qi->desc;
771
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800772restart:
773 rc = 0;
774
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200775 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700776 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200777 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700778 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200779 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700780 }
781
782 index = qi->free_head;
783 wait_index = (index + 1) % QI_LENGTH;
784
785 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
786
787 hw[index] = *desc;
788
Yu Zhao704126a2009-01-04 16:28:52 +0800789 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
790 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700791 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
792
793 hw[wait_index] = wait_desc;
794
795 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
796 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
797
798 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
799 qi->free_cnt -= 2;
800
Suresh Siddhafe962e92008-07-10 11:16:42 -0700801 /*
802 * update the HW tail register indicating the presence of
803 * new descriptors.
804 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800805 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700806
807 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700808 /*
809 * We will leave the interrupts disabled, to prevent interrupt
810 * context to queue another cmd while a cmd is already submitted
811 * and waiting for completion on this cpu. This is to avoid
812 * a deadlock where the interrupt context can wait indefinitely
813 * for free slots in the queue.
814 */
Yu Zhao704126a2009-01-04 16:28:52 +0800815 rc = qi_check_fault(iommu, index);
816 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800817 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800818
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200819 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700820 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200821 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700822 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800823
824 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700825
826 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200827 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800828
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800829 if (rc == -EAGAIN)
830 goto restart;
831
Yu Zhao704126a2009-01-04 16:28:52 +0800832 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700833}
834
835/*
836 * Flush the global interrupt entry cache.
837 */
838void qi_global_iec(struct intel_iommu *iommu)
839{
840 struct qi_desc desc;
841
842 desc.low = QI_IEC_TYPE;
843 desc.high = 0;
844
Yu Zhao704126a2009-01-04 16:28:52 +0800845 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700846 qi_submit_sync(&desc, iommu);
847}
848
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100849void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
850 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700851{
Youquan Song3481f212008-10-16 16:31:55 -0700852 struct qi_desc desc;
853
Youquan Song3481f212008-10-16 16:31:55 -0700854 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
855 | QI_CC_GRAN(type) | QI_CC_TYPE;
856 desc.high = 0;
857
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100858 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700859}
860
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100861void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
862 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700863{
864 u8 dw = 0, dr = 0;
865
866 struct qi_desc desc;
867 int ih = 0;
868
Youquan Song3481f212008-10-16 16:31:55 -0700869 if (cap_write_drain(iommu->cap))
870 dw = 1;
871
872 if (cap_read_drain(iommu->cap))
873 dr = 1;
874
875 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
876 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
877 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
878 | QI_IOTLB_AM(size_order);
879
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100880 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700881}
882
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800883void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
884 u64 addr, unsigned mask)
885{
886 struct qi_desc desc;
887
888 if (mask) {
889 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
890 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
891 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
892 } else
893 desc.high = QI_DEV_IOTLB_ADDR(addr);
894
895 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
896 qdep = 0;
897
898 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
899 QI_DIOTLB_TYPE;
900
901 qi_submit_sync(&desc, iommu);
902}
903
Suresh Siddhafe962e92008-07-10 11:16:42 -0700904/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700905 * Disable Queued Invalidation interface.
906 */
907void dmar_disable_qi(struct intel_iommu *iommu)
908{
909 unsigned long flags;
910 u32 sts;
911 cycles_t start_time = get_cycles();
912
913 if (!ecap_qis(iommu->ecap))
914 return;
915
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200916 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700917
918 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
919 if (!(sts & DMA_GSTS_QIES))
920 goto end;
921
922 /*
923 * Give a chance to HW to complete the pending invalidation requests.
924 */
925 while ((readl(iommu->reg + DMAR_IQT_REG) !=
926 readl(iommu->reg + DMAR_IQH_REG)) &&
927 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
928 cpu_relax();
929
930 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700931 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
932
933 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
934 !(sts & DMA_GSTS_QIES), sts);
935end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200936 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700937}
938
939/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700940 * Enable queued invalidation.
941 */
942static void __dmar_enable_qi(struct intel_iommu *iommu)
943{
David Woodhousec416daa2009-05-10 20:30:58 +0100944 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700945 unsigned long flags;
946 struct q_inval *qi = iommu->qi;
947
948 qi->free_head = qi->free_tail = 0;
949 qi->free_cnt = QI_LENGTH;
950
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200951 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700952
953 /* write zero to the tail reg */
954 writel(0, iommu->reg + DMAR_IQT_REG);
955
956 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
957
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700958 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +0100959 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700960
961 /* Make sure hardware complete it */
962 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
963
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200964 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700965}
966
967/*
Suresh Siddhafe962e92008-07-10 11:16:42 -0700968 * Enable Queued Invalidation interface. This is a must to support
969 * interrupt-remapping. Also used by DMA-remapping, which replaces
970 * register based IOTLB invalidation.
971 */
972int dmar_enable_qi(struct intel_iommu *iommu)
973{
Suresh Siddhafe962e92008-07-10 11:16:42 -0700974 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -0700975 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700976
977 if (!ecap_qis(iommu->ecap))
978 return -ENOENT;
979
980 /*
981 * queued invalidation is already setup and enabled.
982 */
983 if (iommu->qi)
984 return 0;
985
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700986 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700987 if (!iommu->qi)
988 return -ENOMEM;
989
990 qi = iommu->qi;
991
Suresh Siddha751cafe2009-10-02 11:01:22 -0700992
993 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
994 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700995 kfree(qi);
996 iommu->qi = 0;
997 return -ENOMEM;
998 }
999
Suresh Siddha751cafe2009-10-02 11:01:22 -07001000 qi->desc = page_address(desc_page);
1001
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001002 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001003 if (!qi->desc_status) {
1004 free_page((unsigned long) qi->desc);
1005 kfree(qi);
1006 iommu->qi = 0;
1007 return -ENOMEM;
1008 }
1009
1010 qi->free_head = qi->free_tail = 0;
1011 qi->free_cnt = QI_LENGTH;
1012
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001013 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001014
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001015 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001016
1017 return 0;
1018}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001019
1020/* iommu interrupt handling. Most stuff are MSI-like. */
1021
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001022enum faulttype {
1023 DMA_REMAP,
1024 INTR_REMAP,
1025 UNKNOWN,
1026};
1027
1028static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001029{
1030 "Software",
1031 "Present bit in root entry is clear",
1032 "Present bit in context entry is clear",
1033 "Invalid context entry",
1034 "Access beyond MGAW",
1035 "PTE Write access is not set",
1036 "PTE Read access is not set",
1037 "Next page table ptr is invalid",
1038 "Root table address invalid",
1039 "Context table ptr is invalid",
1040 "non-zero reserved fields in RTP",
1041 "non-zero reserved fields in CTP",
1042 "non-zero reserved fields in PTE",
1043};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001044
Suresh Siddha95a02e92012-03-30 11:47:07 -07001045static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001046{
1047 "Detected reserved fields in the decoded interrupt-remapped request",
1048 "Interrupt index exceeded the interrupt-remapping table size",
1049 "Present field in the IRTE entry is clear",
1050 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1051 "Detected reserved fields in the IRTE entry",
1052 "Blocked a compatibility format interrupt request",
1053 "Blocked an interrupt request due to source-id verification failure",
1054};
1055
Suresh Siddha0ac24912009-03-16 17:04:54 -07001056#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1057
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001058const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001059{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001060 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1061 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001062 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001063 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001064 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1065 *fault_type = DMA_REMAP;
1066 return dma_remap_fault_reasons[fault_reason];
1067 } else {
1068 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001069 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001070 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001071}
1072
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001073void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001074{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001075 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001076 unsigned long flag;
1077
1078 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001079 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001080 writel(0, iommu->reg + DMAR_FECTL_REG);
1081 /* Read a reg to force flush the post write */
1082 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001083 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001084}
1085
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001086void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001087{
1088 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001089 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001090
1091 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001092 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001093 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1094 /* Read a reg to force flush the post write */
1095 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001096 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001097}
1098
1099void dmar_msi_write(int irq, struct msi_msg *msg)
1100{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001101 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001102 unsigned long flag;
1103
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001104 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001105 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1106 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1107 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001108 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001109}
1110
1111void dmar_msi_read(int irq, struct msi_msg *msg)
1112{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001113 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001114 unsigned long flag;
1115
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001116 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001117 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1118 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1119 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001120 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001121}
1122
1123static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1124 u8 fault_reason, u16 source_id, unsigned long long addr)
1125{
1126 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001127 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001128
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001129 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001130
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001131 if (fault_type == INTR_REMAP)
1132 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1133 "fault index %llx\n"
1134 "INTR-REMAP:[fault reason %02d] %s\n",
1135 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1136 PCI_FUNC(source_id & 0xFF), addr >> 48,
1137 fault_reason, reason);
1138 else
1139 printk(KERN_ERR
1140 "DMAR:[%s] Request device [%02x:%02x.%d] "
1141 "fault addr %llx \n"
1142 "DMAR:[fault reason %02d] %s\n",
1143 (type ? "DMA Read" : "DMA Write"),
1144 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1145 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001146 return 0;
1147}
1148
1149#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001150irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001151{
1152 struct intel_iommu *iommu = dev_id;
1153 int reg, fault_index;
1154 u32 fault_status;
1155 unsigned long flag;
1156
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001157 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001159 if (fault_status)
1160 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1161 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001162
1163 /* TBD: ignore advanced fault log currently */
1164 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001165 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001166
1167 fault_index = dma_fsts_fault_record_index(fault_status);
1168 reg = cap_fault_reg_offset(iommu->cap);
1169 while (1) {
1170 u8 fault_reason;
1171 u16 source_id;
1172 u64 guest_addr;
1173 int type;
1174 u32 data;
1175
1176 /* highest 32 bits */
1177 data = readl(iommu->reg + reg +
1178 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1179 if (!(data & DMA_FRCD_F))
1180 break;
1181
1182 fault_reason = dma_frcd_fault_reason(data);
1183 type = dma_frcd_type(data);
1184
1185 data = readl(iommu->reg + reg +
1186 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1187 source_id = dma_frcd_source_id(data);
1188
1189 guest_addr = dmar_readq(iommu->reg + reg +
1190 fault_index * PRIMARY_FAULT_REG_LEN);
1191 guest_addr = dma_frcd_page_addr(guest_addr);
1192 /* clear the fault */
1193 writel(DMA_FRCD_F, iommu->reg + reg +
1194 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1195
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001196 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001197
1198 dmar_fault_do_one(iommu, type, fault_reason,
1199 source_id, guest_addr);
1200
1201 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001202 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001203 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001204 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001205 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001206clear_rest:
1207 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001208 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001209 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001210
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001211 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001212 return IRQ_HANDLED;
1213}
1214
1215int dmar_set_interrupt(struct intel_iommu *iommu)
1216{
1217 int irq, ret;
1218
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001219 /*
1220 * Check if the fault interrupt is already initialized.
1221 */
1222 if (iommu->irq)
1223 return 0;
1224
Suresh Siddha0ac24912009-03-16 17:04:54 -07001225 irq = create_irq();
1226 if (!irq) {
1227 printk(KERN_ERR "IOMMU: no free vectors\n");
1228 return -EINVAL;
1229 }
1230
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001231 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001232 iommu->irq = irq;
1233
1234 ret = arch_setup_dmar_msi(irq);
1235 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001236 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001237 iommu->irq = 0;
1238 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001239 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001240 }
1241
Thomas Gleixner477694e2011-07-19 16:25:42 +02001242 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001243 if (ret)
1244 printk(KERN_ERR "IOMMU: can't request irq\n");
1245 return ret;
1246}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001247
1248int __init enable_drhd_fault_handling(void)
1249{
1250 struct dmar_drhd_unit *drhd;
1251
1252 /*
1253 * Enable fault control interrupt.
1254 */
1255 for_each_drhd_unit(drhd) {
1256 int ret;
1257 struct intel_iommu *iommu = drhd->iommu;
1258 ret = dmar_set_interrupt(iommu);
1259
1260 if (ret) {
1261 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1262 " interrupt, ret %d\n",
1263 (unsigned long long)drhd->reg_base_addr, ret);
1264 return -1;
1265 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001266
1267 /*
1268 * Clear any previous faults.
1269 */
1270 dmar_fault(iommu->irq, iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001271 }
1272
1273 return 0;
1274}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001275
1276/*
1277 * Re-enable Queued Invalidation interface.
1278 */
1279int dmar_reenable_qi(struct intel_iommu *iommu)
1280{
1281 if (!ecap_qis(iommu->ecap))
1282 return -ENOENT;
1283
1284 if (!iommu->qi)
1285 return -ENOENT;
1286
1287 /*
1288 * First disable queued invalidation.
1289 */
1290 dmar_disable_qi(iommu);
1291 /*
1292 * Then enable queued invalidation again. Since there is no pending
1293 * invalidation requests now, it's safe to re-enable queued
1294 * invalidation.
1295 */
1296 __dmar_enable_qi(iommu);
1297
1298 return 0;
1299}
Youquan Song074835f2009-09-09 12:05:39 -04001300
1301/*
1302 * Check interrupt remapping support in DMAR table description.
1303 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001304int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001305{
1306 struct acpi_table_dmar *dmar;
1307 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001308 if (!dmar)
1309 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001310 return dmar->flags & 0x1;
1311}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001312IOMMU_INIT_POST(detect_intel_iommu);