blob: 28d93b68ff0264b8a1c2ec6dc626f8c94784d057 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040088 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089 break;
90 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080091 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040093 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800102 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700103 return 0;
104 }
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400109 pr_warn("Device scope type does not match for %s\n",
110 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700111 return -EINVAL;
112 }
113 *dev = pdev;
114 return 0;
115}
116
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700117int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
118 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700119{
120 struct acpi_dmar_device_scope *scope;
121 void * tmp = start;
122 int index;
123 int ret;
124
125 *cnt = 0;
126 while (start < end) {
127 scope = start;
128 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
129 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
130 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600131 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
132 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400133 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100134 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700135 start += scope->length;
136 }
137 if (*cnt == 0)
138 return 0;
139
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141 if (!*devices)
142 return -ENOMEM;
143
144 start = tmp;
145 index = 0;
146 while (start < end) {
147 scope = start;
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
152 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800153 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700154 return ret;
155 }
156 index ++;
157 }
158 start += scope->length;
159 }
160
161 return 0;
162}
163
Jiang Liuada4d4b2014-01-06 14:18:09 +0800164void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
165{
166 if (*devices && *cnt) {
167 while (--*cnt >= 0)
168 pci_dev_put((*devices)[*cnt]);
169 kfree(*devices);
170 *devices = NULL;
171 *cnt = 0;
172 }
173}
174
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700175/**
176 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
177 * structure which uniquely represent one DMA remapping hardware unit
178 * present in the platform
179 */
180static int __init
181dmar_parse_one_drhd(struct acpi_dmar_header *header)
182{
183 struct acpi_dmar_hardware_unit *drhd;
184 struct dmar_drhd_unit *dmaru;
185 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186
David Woodhousee523b382009-04-10 22:27:48 -0700187 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700188 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
189 if (!dmaru)
190 return -ENOMEM;
191
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700192 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700193 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100194 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700195 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
196
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700197 ret = alloc_iommu(dmaru);
198 if (ret) {
199 kfree(dmaru);
200 return ret;
201 }
202 dmar_register_drhd_unit(dmaru);
203 return 0;
204}
205
David Woodhousef82851a2008-10-18 15:43:14 +0100206static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700207{
208 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100209 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700210
211 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
212
Yu Zhao2e824f72008-12-22 16:54:58 +0800213 if (dmaru->include_all)
214 return 0;
215
216 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700217 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700218 &dmaru->devices_cnt, &dmaru->devices,
219 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700220 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700221 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700222 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700223 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700224 return ret;
225}
226
David Woodhouseaa697072009-10-07 12:18:00 +0100227#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700228static int __init
229dmar_parse_one_rhsa(struct acpi_dmar_header *header)
230{
231 struct acpi_dmar_rhsa *rhsa;
232 struct dmar_drhd_unit *drhd;
233
234 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100235 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700236 if (drhd->reg_base_addr == rhsa->base_address) {
237 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
238
239 if (!node_online(node))
240 node = -1;
241 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100242 return 0;
243 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700244 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100245 WARN_TAINT(
246 1, TAINT_FIRMWARE_WORKAROUND,
247 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
248 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
249 drhd->reg_base_addr,
250 dmi_get_system_info(DMI_BIOS_VENDOR),
251 dmi_get_system_info(DMI_BIOS_VERSION),
252 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700253
David Woodhouseaa697072009-10-07 12:18:00 +0100254 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700255}
David Woodhouseaa697072009-10-07 12:18:00 +0100256#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700257
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700258static void __init
259dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
260{
261 struct acpi_dmar_hardware_unit *drhd;
262 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800263 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700264 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700265
266 switch (header->type) {
267 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800268 drhd = container_of(header, struct acpi_dmar_hardware_unit,
269 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400270 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800271 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700272 break;
273 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800274 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
275 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400276 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700277 (unsigned long long)rmrr->base_address,
278 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700279 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800280 case ACPI_DMAR_TYPE_ATSR:
281 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400282 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800283 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700284 case ACPI_DMAR_HARDWARE_AFFINITY:
285 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400286 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700287 (unsigned long long)rhsa->base_address,
288 rhsa->proximity_domain);
289 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700290 }
291}
292
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700293/**
294 * dmar_table_detect - checks to see if the platform supports DMAR devices
295 */
296static int __init dmar_table_detect(void)
297{
298 acpi_status status = AE_OK;
299
300 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800301 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
302 (struct acpi_table_header **)&dmar_tbl,
303 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700304
305 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400306 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700307 status = AE_NOT_FOUND;
308 }
309
310 return (ACPI_SUCCESS(status) ? 1 : 0);
311}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700312
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700313/**
314 * parse_dmar_table - parses the DMA reporting table
315 */
316static int __init
317parse_dmar_table(void)
318{
319 struct acpi_table_dmar *dmar;
320 struct acpi_dmar_header *entry_header;
321 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800322 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700323
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700324 /*
325 * Do it again, earlier dmar_tbl mapping could be mapped with
326 * fixed map.
327 */
328 dmar_table_detect();
329
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700330 /*
331 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
332 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
333 */
334 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
335
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700336 dmar = (struct acpi_table_dmar *)dmar_tbl;
337 if (!dmar)
338 return -ENODEV;
339
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700340 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400341 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700342 return -EINVAL;
343 }
344
Donald Dutilee9071b02012-06-08 17:13:11 -0400345 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700346
347 entry_header = (struct acpi_dmar_header *)(dmar + 1);
348 while (((unsigned long)entry_header) <
349 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800350 /* Avoid looping forever on bad ACPI tables */
351 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400352 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800353 ret = -EINVAL;
354 break;
355 }
356
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700357 dmar_table_print_dmar_entry(entry_header);
358
359 switch (entry_header->type) {
360 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800361 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700362 ret = dmar_parse_one_drhd(entry_header);
363 break;
364 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
365 ret = dmar_parse_one_rmrr(entry_header);
366 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800367 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800368 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800369 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700370 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100371#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700372 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100373#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700374 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700375 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400376 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100377 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700378 ret = 0; /* for forward compatibility */
379 break;
380 }
381 if (ret)
382 break;
383
384 entry_header = ((void *)entry_header + entry_header->length);
385 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800386 if (drhd_count == 0)
387 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700388 return ret;
389}
390
Yinghaidda56542010-04-09 01:07:55 +0100391static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700392 struct pci_dev *dev)
393{
394 int index;
395
396 while (dev) {
397 for (index = 0; index < cnt; index++)
398 if (dev == devices[index])
399 return 1;
400
401 /* Check our parent */
402 dev = dev->bus->self;
403 }
404
405 return 0;
406}
407
408struct dmar_drhd_unit *
409dmar_find_matched_drhd_unit(struct pci_dev *dev)
410{
Yu Zhao2e824f72008-12-22 16:54:58 +0800411 struct dmar_drhd_unit *dmaru = NULL;
412 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700413
Yinghaidda56542010-04-09 01:07:55 +0100414 dev = pci_physfn(dev);
415
Yijing Wang8b161f02013-10-31 17:25:16 +0800416 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800417 drhd = container_of(dmaru->hdr,
418 struct acpi_dmar_hardware_unit,
419 header);
420
421 if (dmaru->include_all &&
422 drhd->segment == pci_domain_nr(dev->bus))
423 return dmaru;
424
425 if (dmar_pci_device_match(dmaru->devices,
426 dmaru->devices_cnt, dev))
427 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700428 }
429
430 return NULL;
431}
432
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700433int __init dmar_dev_scope_init(void)
434{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700435 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700436 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700437 int ret = -ENODEV;
438
Suresh Siddhac2c72862011-08-23 17:05:19 -0700439 if (dmar_dev_scope_initialized)
440 return dmar_dev_scope_initialized;
441
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700442 if (list_empty(&dmar_drhd_units))
443 goto fail;
444
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700445 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700446 ret = dmar_parse_dev(drhd);
447 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700448 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700449 }
450
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700451 ret = dmar_parse_rmrr_atsr_dev();
452 if (ret)
453 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700454
Suresh Siddhac2c72862011-08-23 17:05:19 -0700455 dmar_dev_scope_initialized = 1;
456 return 0;
457
458fail:
459 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700460 return ret;
461}
462
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700463
464int __init dmar_table_init(void)
465{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700466 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800467 int ret;
468
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700469 if (dmar_table_initialized)
470 return 0;
471
472 dmar_table_initialized = 1;
473
Fenghua Yu093f87d2007-11-21 15:07:14 -0800474 ret = parse_dmar_table();
475 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700476 if (ret != -ENODEV)
Donald Dutilee9071b02012-06-08 17:13:11 -0400477 pr_info("parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800478 return ret;
479 }
480
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700481 if (list_empty(&dmar_drhd_units)) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400482 pr_info("No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700483 return -ENODEV;
484 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800485
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700486 return 0;
487}
488
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100489static void warn_invalid_dmar(u64 addr, const char *message)
490{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100491 WARN_TAINT_ONCE(
492 1, TAINT_FIRMWARE_WORKAROUND,
493 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
494 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
495 addr, message,
496 dmi_get_system_info(DMI_BIOS_VENDOR),
497 dmi_get_system_info(DMI_BIOS_VERSION),
498 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100499}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000500
Rashika Kheria21004dc2013-12-18 12:01:46 +0530501static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000502{
503 struct acpi_table_dmar *dmar;
504 struct acpi_dmar_header *entry_header;
505 struct acpi_dmar_hardware_unit *drhd;
506
507 dmar = (struct acpi_table_dmar *)dmar_tbl;
508 entry_header = (struct acpi_dmar_header *)(dmar + 1);
509
510 while (((unsigned long)entry_header) <
511 (((unsigned long)dmar) + dmar_tbl->length)) {
512 /* Avoid looping forever on bad ACPI tables */
513 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400514 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000515 return 0;
516 }
517
518 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000519 void __iomem *addr;
520 u64 cap, ecap;
521
David Woodhouse86cf8982009-11-09 22:15:15 +0000522 drhd = (void *)entry_header;
523 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100524 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000525 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000526 }
Chris Wright2c992202009-12-02 09:17:13 +0000527
528 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
529 if (!addr ) {
530 printk("IOMMU: can't validate: %llx\n", drhd->address);
531 goto failed;
532 }
533 cap = dmar_readq(addr + DMAR_CAP_REG);
534 ecap = dmar_readq(addr + DMAR_ECAP_REG);
535 early_iounmap(addr, VTD_PAGE_SIZE);
536 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100537 warn_invalid_dmar(drhd->address,
538 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000539 goto failed;
540 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000541 }
542
543 entry_header = ((void *)entry_header + entry_header->length);
544 }
545 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000546
547failed:
Chris Wright2c992202009-12-02 09:17:13 +0000548 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000549}
550
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400551int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700552{
553 int ret;
554
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700555 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000556 if (ret)
557 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700558 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700559 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200560
Suresh Siddha1cb11582008-07-10 11:16:51 -0700561 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700562
Suresh Siddha95a02e92012-03-30 11:47:07 -0700563 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700564 dmar->flags & 0x1)
Donald Dutilee9071b02012-06-08 17:13:11 -0400565 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700566
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800567 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700568 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800569 /* Make sure ACS will be enabled */
570 pci_request_acs();
571 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700572
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900573#ifdef CONFIG_X86
574 if (ret)
575 x86_init.iommu.iommu_init = intel_iommu_init;
576#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700577 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800578 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700579 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400580
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400581 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700582}
583
584
Donald Dutile6f5cf522012-06-04 17:29:02 -0400585static void unmap_iommu(struct intel_iommu *iommu)
586{
587 iounmap(iommu->reg);
588 release_mem_region(iommu->reg_phys, iommu->reg_size);
589}
590
591/**
592 * map_iommu: map the iommu's registers
593 * @iommu: the iommu to map
594 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400595 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400596 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400597 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400598 */
599static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
600{
601 int map_size, err=0;
602
603 iommu->reg_phys = phys_addr;
604 iommu->reg_size = VTD_PAGE_SIZE;
605
606 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
607 pr_err("IOMMU: can't reserve memory\n");
608 err = -EBUSY;
609 goto out;
610 }
611
612 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
613 if (!iommu->reg) {
614 pr_err("IOMMU: can't map the region\n");
615 err = -ENOMEM;
616 goto release;
617 }
618
619 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
620 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
621
622 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
623 err = -EINVAL;
624 warn_invalid_dmar(phys_addr, " returns all ones");
625 goto unmap;
626 }
627
628 /* the registers might be more than one page */
629 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
630 cap_max_fault_reg_offset(iommu->cap));
631 map_size = VTD_PAGE_ALIGN(map_size);
632 if (map_size > iommu->reg_size) {
633 iounmap(iommu->reg);
634 release_mem_region(iommu->reg_phys, iommu->reg_size);
635 iommu->reg_size = map_size;
636 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
637 iommu->name)) {
638 pr_err("IOMMU: can't reserve memory\n");
639 err = -EBUSY;
640 goto out;
641 }
642 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
643 if (!iommu->reg) {
644 pr_err("IOMMU: can't map the region\n");
645 err = -ENOMEM;
646 goto release;
647 }
648 }
649 err = 0;
650 goto out;
651
652unmap:
653 iounmap(iommu->reg);
654release:
655 release_mem_region(iommu->reg_phys, iommu->reg_size);
656out:
657 return err;
658}
659
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700660int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700661{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700662 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900663 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700664 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100665 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700666 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400667 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700668
David Woodhouse6ecbf012009-12-02 09:20:27 +0000669 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100670 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000671 return -EINVAL;
672 }
673
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700674 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
675 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700676 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700677
678 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700679 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700680
Donald Dutile6f5cf522012-06-04 17:29:02 -0400681 err = map_iommu(iommu, drhd->reg_base_addr);
682 if (err) {
683 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700684 goto error;
685 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700686
Donald Dutile6f5cf522012-06-04 17:29:02 -0400687 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800688 agaw = iommu_calculate_agaw(iommu);
689 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400690 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
691 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100692 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700693 }
694 msagaw = iommu_calculate_max_sagaw(iommu);
695 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400696 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800697 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100698 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800699 }
700 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700701 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800702
Suresh Siddhaee34b322009-10-02 11:01:21 -0700703 iommu->node = -1;
704
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700705 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100706 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
707 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700708 (unsigned long long)drhd->reg_base_addr,
709 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
710 (unsigned long long)iommu->cap,
711 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700712
Takao Indoh3a93c842013-04-23 17:35:03 +0900713 /* Reflect status in gcmd */
714 sts = readl(iommu->reg + DMAR_GSTS_REG);
715 if (sts & DMA_GSTS_IRES)
716 iommu->gcmd |= DMA_GCMD_IRE;
717 if (sts & DMA_GSTS_TES)
718 iommu->gcmd |= DMA_GCMD_TE;
719 if (sts & DMA_GSTS_QIES)
720 iommu->gcmd |= DMA_GCMD_QIE;
721
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200722 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700723
724 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700725 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100726
727 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400728 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100729 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700730 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400731 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700732}
733
734void free_iommu(struct intel_iommu *iommu)
735{
736 if (!iommu)
737 return;
738
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700739 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700740
741 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400742 unmap_iommu(iommu);
743
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700744 kfree(iommu);
745}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700746
747/*
748 * Reclaim all the submitted descriptors which have completed its work.
749 */
750static inline void reclaim_free_desc(struct q_inval *qi)
751{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800752 while (qi->desc_status[qi->free_tail] == QI_DONE ||
753 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700754 qi->desc_status[qi->free_tail] = QI_FREE;
755 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
756 qi->free_cnt++;
757 }
758}
759
Yu Zhao704126a2009-01-04 16:28:52 +0800760static int qi_check_fault(struct intel_iommu *iommu, int index)
761{
762 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800763 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800764 struct q_inval *qi = iommu->qi;
765 int wait_index = (index + 1) % QI_LENGTH;
766
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800767 if (qi->desc_status[wait_index] == QI_ABORT)
768 return -EAGAIN;
769
Yu Zhao704126a2009-01-04 16:28:52 +0800770 fault = readl(iommu->reg + DMAR_FSTS_REG);
771
772 /*
773 * If IQE happens, the head points to the descriptor associated
774 * with the error. No new descriptors are fetched until the IQE
775 * is cleared.
776 */
777 if (fault & DMA_FSTS_IQE) {
778 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800779 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400780 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800781 "low=%llx, high=%llx\n",
782 (unsigned long long)qi->desc[index].low,
783 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800784 memcpy(&qi->desc[index], &qi->desc[wait_index],
785 sizeof(struct qi_desc));
786 __iommu_flush_cache(iommu, &qi->desc[index],
787 sizeof(struct qi_desc));
788 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
789 return -EINVAL;
790 }
791 }
792
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800793 /*
794 * If ITE happens, all pending wait_desc commands are aborted.
795 * No new descriptors are fetched until the ITE is cleared.
796 */
797 if (fault & DMA_FSTS_ITE) {
798 head = readl(iommu->reg + DMAR_IQH_REG);
799 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
800 head |= 1;
801 tail = readl(iommu->reg + DMAR_IQT_REG);
802 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
803
804 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
805
806 do {
807 if (qi->desc_status[head] == QI_IN_USE)
808 qi->desc_status[head] = QI_ABORT;
809 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
810 } while (head != tail);
811
812 if (qi->desc_status[wait_index] == QI_ABORT)
813 return -EAGAIN;
814 }
815
816 if (fault & DMA_FSTS_ICE)
817 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
818
Yu Zhao704126a2009-01-04 16:28:52 +0800819 return 0;
820}
821
Suresh Siddhafe962e92008-07-10 11:16:42 -0700822/*
823 * Submit the queued invalidation descriptor to the remapping
824 * hardware unit and wait for its completion.
825 */
Yu Zhao704126a2009-01-04 16:28:52 +0800826int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700827{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800828 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700829 struct q_inval *qi = iommu->qi;
830 struct qi_desc *hw, wait_desc;
831 int wait_index, index;
832 unsigned long flags;
833
834 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800835 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700836
837 hw = qi->desc;
838
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800839restart:
840 rc = 0;
841
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200842 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700843 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200844 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700845 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200846 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700847 }
848
849 index = qi->free_head;
850 wait_index = (index + 1) % QI_LENGTH;
851
852 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
853
854 hw[index] = *desc;
855
Yu Zhao704126a2009-01-04 16:28:52 +0800856 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
857 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700858 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
859
860 hw[wait_index] = wait_desc;
861
862 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
863 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
864
865 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
866 qi->free_cnt -= 2;
867
Suresh Siddhafe962e92008-07-10 11:16:42 -0700868 /*
869 * update the HW tail register indicating the presence of
870 * new descriptors.
871 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800872 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700873
874 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700875 /*
876 * We will leave the interrupts disabled, to prevent interrupt
877 * context to queue another cmd while a cmd is already submitted
878 * and waiting for completion on this cpu. This is to avoid
879 * a deadlock where the interrupt context can wait indefinitely
880 * for free slots in the queue.
881 */
Yu Zhao704126a2009-01-04 16:28:52 +0800882 rc = qi_check_fault(iommu, index);
883 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800884 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800885
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200886 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700887 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200888 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700889 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800890
891 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700892
893 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200894 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800895
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800896 if (rc == -EAGAIN)
897 goto restart;
898
Yu Zhao704126a2009-01-04 16:28:52 +0800899 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700900}
901
902/*
903 * Flush the global interrupt entry cache.
904 */
905void qi_global_iec(struct intel_iommu *iommu)
906{
907 struct qi_desc desc;
908
909 desc.low = QI_IEC_TYPE;
910 desc.high = 0;
911
Yu Zhao704126a2009-01-04 16:28:52 +0800912 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700913 qi_submit_sync(&desc, iommu);
914}
915
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100916void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
917 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700918{
Youquan Song3481f212008-10-16 16:31:55 -0700919 struct qi_desc desc;
920
Youquan Song3481f212008-10-16 16:31:55 -0700921 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
922 | QI_CC_GRAN(type) | QI_CC_TYPE;
923 desc.high = 0;
924
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100925 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700926}
927
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100928void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
929 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700930{
931 u8 dw = 0, dr = 0;
932
933 struct qi_desc desc;
934 int ih = 0;
935
Youquan Song3481f212008-10-16 16:31:55 -0700936 if (cap_write_drain(iommu->cap))
937 dw = 1;
938
939 if (cap_read_drain(iommu->cap))
940 dr = 1;
941
942 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
943 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
944 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
945 | QI_IOTLB_AM(size_order);
946
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100947 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700948}
949
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800950void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
951 u64 addr, unsigned mask)
952{
953 struct qi_desc desc;
954
955 if (mask) {
956 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
957 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
958 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
959 } else
960 desc.high = QI_DEV_IOTLB_ADDR(addr);
961
962 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
963 qdep = 0;
964
965 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
966 QI_DIOTLB_TYPE;
967
968 qi_submit_sync(&desc, iommu);
969}
970
Suresh Siddhafe962e92008-07-10 11:16:42 -0700971/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700972 * Disable Queued Invalidation interface.
973 */
974void dmar_disable_qi(struct intel_iommu *iommu)
975{
976 unsigned long flags;
977 u32 sts;
978 cycles_t start_time = get_cycles();
979
980 if (!ecap_qis(iommu->ecap))
981 return;
982
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200983 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700984
985 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
986 if (!(sts & DMA_GSTS_QIES))
987 goto end;
988
989 /*
990 * Give a chance to HW to complete the pending invalidation requests.
991 */
992 while ((readl(iommu->reg + DMAR_IQT_REG) !=
993 readl(iommu->reg + DMAR_IQH_REG)) &&
994 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
995 cpu_relax();
996
997 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700998 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
999
1000 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1001 !(sts & DMA_GSTS_QIES), sts);
1002end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001003 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001004}
1005
1006/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001007 * Enable queued invalidation.
1008 */
1009static void __dmar_enable_qi(struct intel_iommu *iommu)
1010{
David Woodhousec416daa2009-05-10 20:30:58 +01001011 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001012 unsigned long flags;
1013 struct q_inval *qi = iommu->qi;
1014
1015 qi->free_head = qi->free_tail = 0;
1016 qi->free_cnt = QI_LENGTH;
1017
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001018 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001019
1020 /* write zero to the tail reg */
1021 writel(0, iommu->reg + DMAR_IQT_REG);
1022
1023 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1024
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001025 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001026 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001027
1028 /* Make sure hardware complete it */
1029 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1030
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001031 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001032}
1033
1034/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001035 * Enable Queued Invalidation interface. This is a must to support
1036 * interrupt-remapping. Also used by DMA-remapping, which replaces
1037 * register based IOTLB invalidation.
1038 */
1039int dmar_enable_qi(struct intel_iommu *iommu)
1040{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001041 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001042 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001043
1044 if (!ecap_qis(iommu->ecap))
1045 return -ENOENT;
1046
1047 /*
1048 * queued invalidation is already setup and enabled.
1049 */
1050 if (iommu->qi)
1051 return 0;
1052
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001053 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001054 if (!iommu->qi)
1055 return -ENOMEM;
1056
1057 qi = iommu->qi;
1058
Suresh Siddha751cafe2009-10-02 11:01:22 -07001059
1060 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1061 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001062 kfree(qi);
1063 iommu->qi = 0;
1064 return -ENOMEM;
1065 }
1066
Suresh Siddha751cafe2009-10-02 11:01:22 -07001067 qi->desc = page_address(desc_page);
1068
Hannes Reinecke37a40712013-02-06 09:50:10 +01001069 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001070 if (!qi->desc_status) {
1071 free_page((unsigned long) qi->desc);
1072 kfree(qi);
1073 iommu->qi = 0;
1074 return -ENOMEM;
1075 }
1076
1077 qi->free_head = qi->free_tail = 0;
1078 qi->free_cnt = QI_LENGTH;
1079
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001080 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001081
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001082 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001083
1084 return 0;
1085}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001086
1087/* iommu interrupt handling. Most stuff are MSI-like. */
1088
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001089enum faulttype {
1090 DMA_REMAP,
1091 INTR_REMAP,
1092 UNKNOWN,
1093};
1094
1095static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001096{
1097 "Software",
1098 "Present bit in root entry is clear",
1099 "Present bit in context entry is clear",
1100 "Invalid context entry",
1101 "Access beyond MGAW",
1102 "PTE Write access is not set",
1103 "PTE Read access is not set",
1104 "Next page table ptr is invalid",
1105 "Root table address invalid",
1106 "Context table ptr is invalid",
1107 "non-zero reserved fields in RTP",
1108 "non-zero reserved fields in CTP",
1109 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001110 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001111};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001112
Suresh Siddha95a02e92012-03-30 11:47:07 -07001113static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001114{
1115 "Detected reserved fields in the decoded interrupt-remapped request",
1116 "Interrupt index exceeded the interrupt-remapping table size",
1117 "Present field in the IRTE entry is clear",
1118 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1119 "Detected reserved fields in the IRTE entry",
1120 "Blocked a compatibility format interrupt request",
1121 "Blocked an interrupt request due to source-id verification failure",
1122};
1123
Suresh Siddha0ac24912009-03-16 17:04:54 -07001124#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1125
Rashika Kheria21004dc2013-12-18 12:01:46 +05301126static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001127{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001128 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1129 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001130 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001131 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001132 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1133 *fault_type = DMA_REMAP;
1134 return dma_remap_fault_reasons[fault_reason];
1135 } else {
1136 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001137 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001138 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001139}
1140
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001141void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001142{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001143 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001144 unsigned long flag;
1145
1146 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001147 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001148 writel(0, iommu->reg + DMAR_FECTL_REG);
1149 /* Read a reg to force flush the post write */
1150 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001151 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001152}
1153
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001154void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001155{
1156 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001157 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001158
1159 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001160 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001161 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1162 /* Read a reg to force flush the post write */
1163 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165}
1166
1167void dmar_msi_write(int irq, struct msi_msg *msg)
1168{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001169 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001170 unsigned long flag;
1171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001173 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1174 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1175 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001176 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001177}
1178
1179void dmar_msi_read(int irq, struct msi_msg *msg)
1180{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001181 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001182 unsigned long flag;
1183
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001184 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001185 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1186 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1187 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001188 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001189}
1190
1191static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1192 u8 fault_reason, u16 source_id, unsigned long long addr)
1193{
1194 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001195 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001196
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001197 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001198
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001199 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001200 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001201 "fault index %llx\n"
1202 "INTR-REMAP:[fault reason %02d] %s\n",
1203 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204 PCI_FUNC(source_id & 0xFF), addr >> 48,
1205 fault_reason, reason);
1206 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001207 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001208 "fault addr %llx \n"
1209 "DMAR:[fault reason %02d] %s\n",
1210 (type ? "DMA Read" : "DMA Write"),
1211 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1212 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001213 return 0;
1214}
1215
1216#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001217irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001218{
1219 struct intel_iommu *iommu = dev_id;
1220 int reg, fault_index;
1221 u32 fault_status;
1222 unsigned long flag;
1223
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001224 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001225 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001226 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001227 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001228
1229 /* TBD: ignore advanced fault log currently */
1230 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001231 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001232
1233 fault_index = dma_fsts_fault_record_index(fault_status);
1234 reg = cap_fault_reg_offset(iommu->cap);
1235 while (1) {
1236 u8 fault_reason;
1237 u16 source_id;
1238 u64 guest_addr;
1239 int type;
1240 u32 data;
1241
1242 /* highest 32 bits */
1243 data = readl(iommu->reg + reg +
1244 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1245 if (!(data & DMA_FRCD_F))
1246 break;
1247
1248 fault_reason = dma_frcd_fault_reason(data);
1249 type = dma_frcd_type(data);
1250
1251 data = readl(iommu->reg + reg +
1252 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1253 source_id = dma_frcd_source_id(data);
1254
1255 guest_addr = dmar_readq(iommu->reg + reg +
1256 fault_index * PRIMARY_FAULT_REG_LEN);
1257 guest_addr = dma_frcd_page_addr(guest_addr);
1258 /* clear the fault */
1259 writel(DMA_FRCD_F, iommu->reg + reg +
1260 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1261
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001263
1264 dmar_fault_do_one(iommu, type, fault_reason,
1265 source_id, guest_addr);
1266
1267 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001268 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001269 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001270 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001271 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001272
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001273 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1274
1275unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001276 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001277 return IRQ_HANDLED;
1278}
1279
1280int dmar_set_interrupt(struct intel_iommu *iommu)
1281{
1282 int irq, ret;
1283
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001284 /*
1285 * Check if the fault interrupt is already initialized.
1286 */
1287 if (iommu->irq)
1288 return 0;
1289
Suresh Siddha0ac24912009-03-16 17:04:54 -07001290 irq = create_irq();
1291 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001292 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001293 return -EINVAL;
1294 }
1295
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001296 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001297 iommu->irq = irq;
1298
1299 ret = arch_setup_dmar_msi(irq);
1300 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001301 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001302 iommu->irq = 0;
1303 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001304 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001305 }
1306
Thomas Gleixner477694e2011-07-19 16:25:42 +02001307 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001308 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001309 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001310 return ret;
1311}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001312
1313int __init enable_drhd_fault_handling(void)
1314{
1315 struct dmar_drhd_unit *drhd;
1316
1317 /*
1318 * Enable fault control interrupt.
1319 */
1320 for_each_drhd_unit(drhd) {
1321 int ret;
1322 struct intel_iommu *iommu = drhd->iommu;
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001323 u32 fault_status;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001324 ret = dmar_set_interrupt(iommu);
1325
1326 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001327 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001328 (unsigned long long)drhd->reg_base_addr, ret);
1329 return -1;
1330 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001331
1332 /*
1333 * Clear any previous faults.
1334 */
1335 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001336 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1337 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001338 }
1339
1340 return 0;
1341}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001342
1343/*
1344 * Re-enable Queued Invalidation interface.
1345 */
1346int dmar_reenable_qi(struct intel_iommu *iommu)
1347{
1348 if (!ecap_qis(iommu->ecap))
1349 return -ENOENT;
1350
1351 if (!iommu->qi)
1352 return -ENOENT;
1353
1354 /*
1355 * First disable queued invalidation.
1356 */
1357 dmar_disable_qi(iommu);
1358 /*
1359 * Then enable queued invalidation again. Since there is no pending
1360 * invalidation requests now, it's safe to re-enable queued
1361 * invalidation.
1362 */
1363 __dmar_enable_qi(iommu);
1364
1365 return 0;
1366}
Youquan Song074835f2009-09-09 12:05:39 -04001367
1368/*
1369 * Check interrupt remapping support in DMAR table description.
1370 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001371int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001372{
1373 struct acpi_table_dmar *dmar;
1374 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001375 if (!dmar)
1376 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001377 return dmar->flags & 0x1;
1378}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001379IOMMU_INIT_POST(detect_intel_iommu);