blob: fb35d1bd19e1012d62f3c3633674f50e8f15b1af [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040088 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089 break;
90 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080091 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040093 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800102 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700118int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400134 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700176
David Woodhousee523b382009-04-10 22:27:48 -0700177 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700182 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700183 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100184 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
David Woodhousef82851a2008-10-18 15:43:14 +0100196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700197{
198 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100199 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
Yu Zhao2e824f72008-12-22 16:54:58 +0800203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700207 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700210 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700211 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700212 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700213 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700214 return ret;
215}
216
David Woodhouseaa697072009-10-07 12:18:00 +0100217#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700218static int __init
219dmar_parse_one_rhsa(struct acpi_dmar_header *header)
220{
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
223
224 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100225 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
228
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100232 return 0;
233 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700234 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700243
David Woodhouseaa697072009-10-07 12:18:00 +0100244 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700245}
David Woodhouseaa697072009-10-07 12:18:00 +0100246#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700247
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700248static void __init
249dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
250{
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800253 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700254 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400260 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800261 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700269 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400272 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800273 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700280 }
281}
282
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700283/**
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
285 */
286static int __init dmar_table_detect(void)
287{
288 acpi_status status = AE_OK;
289
290 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700294
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400296 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700297 status = AE_NOT_FOUND;
298 }
299
300 return (ACPI_SUCCESS(status) ? 1 : 0);
301}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700302
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700303/**
304 * parse_dmar_table - parses the DMA reporting table
305 */
306static int __init
307parse_dmar_table(void)
308{
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800312 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700313
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700314 /*
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
317 */
318 dmar_table_detect();
319
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700320 /*
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 */
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
329
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700330 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400331 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700332 return -EINVAL;
333 }
334
Donald Dutilee9071b02012-06-08 17:13:11 -0400335 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700336
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400342 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800343 ret = -EINVAL;
344 break;
345 }
346
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700347 dmar_table_print_dmar_entry(entry_header);
348
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800351 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700352 ret = dmar_parse_one_drhd(entry_header);
353 break;
354 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
355 ret = dmar_parse_one_rmrr(entry_header);
356 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800357 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800358 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800359 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700360 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100361#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700362 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100363#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700364 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700365 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400366 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100367 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700368 ret = 0; /* for forward compatibility */
369 break;
370 }
371 if (ret)
372 break;
373
374 entry_header = ((void *)entry_header + entry_header->length);
375 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800376 if (drhd_count == 0)
377 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700378 return ret;
379}
380
Yinghaidda56542010-04-09 01:07:55 +0100381static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700382 struct pci_dev *dev)
383{
384 int index;
385
386 while (dev) {
387 for (index = 0; index < cnt; index++)
388 if (dev == devices[index])
389 return 1;
390
391 /* Check our parent */
392 dev = dev->bus->self;
393 }
394
395 return 0;
396}
397
398struct dmar_drhd_unit *
399dmar_find_matched_drhd_unit(struct pci_dev *dev)
400{
Yu Zhao2e824f72008-12-22 16:54:58 +0800401 struct dmar_drhd_unit *dmaru = NULL;
402 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700403
Yinghaidda56542010-04-09 01:07:55 +0100404 dev = pci_physfn(dev);
405
Yijing Wang8b161f02013-10-31 17:25:16 +0800406 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800407 drhd = container_of(dmaru->hdr,
408 struct acpi_dmar_hardware_unit,
409 header);
410
411 if (dmaru->include_all &&
412 drhd->segment == pci_domain_nr(dev->bus))
413 return dmaru;
414
415 if (dmar_pci_device_match(dmaru->devices,
416 dmaru->devices_cnt, dev))
417 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700418 }
419
420 return NULL;
421}
422
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700423int __init dmar_dev_scope_init(void)
424{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700425 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700426 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700427 int ret = -ENODEV;
428
Suresh Siddhac2c72862011-08-23 17:05:19 -0700429 if (dmar_dev_scope_initialized)
430 return dmar_dev_scope_initialized;
431
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700432 if (list_empty(&dmar_drhd_units))
433 goto fail;
434
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700435 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700436 ret = dmar_parse_dev(drhd);
437 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700438 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700439 }
440
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700441 ret = dmar_parse_rmrr_atsr_dev();
442 if (ret)
443 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700444
Suresh Siddhac2c72862011-08-23 17:05:19 -0700445 dmar_dev_scope_initialized = 1;
446 return 0;
447
448fail:
449 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700450 return ret;
451}
452
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700453
454int __init dmar_table_init(void)
455{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700456 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800457 int ret;
458
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700459 if (dmar_table_initialized)
460 return 0;
461
462 dmar_table_initialized = 1;
463
Fenghua Yu093f87d2007-11-21 15:07:14 -0800464 ret = parse_dmar_table();
465 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700466 if (ret != -ENODEV)
Donald Dutilee9071b02012-06-08 17:13:11 -0400467 pr_info("parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800468 return ret;
469 }
470
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700471 if (list_empty(&dmar_drhd_units)) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400472 pr_info("No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700473 return -ENODEV;
474 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800475
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700476 return 0;
477}
478
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100479static void warn_invalid_dmar(u64 addr, const char *message)
480{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100481 WARN_TAINT_ONCE(
482 1, TAINT_FIRMWARE_WORKAROUND,
483 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
484 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
485 addr, message,
486 dmi_get_system_info(DMI_BIOS_VENDOR),
487 dmi_get_system_info(DMI_BIOS_VERSION),
488 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100489}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000490
Rashika Kheria21004dc2013-12-18 12:01:46 +0530491static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000492{
493 struct acpi_table_dmar *dmar;
494 struct acpi_dmar_header *entry_header;
495 struct acpi_dmar_hardware_unit *drhd;
496
497 dmar = (struct acpi_table_dmar *)dmar_tbl;
498 entry_header = (struct acpi_dmar_header *)(dmar + 1);
499
500 while (((unsigned long)entry_header) <
501 (((unsigned long)dmar) + dmar_tbl->length)) {
502 /* Avoid looping forever on bad ACPI tables */
503 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400504 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000505 return 0;
506 }
507
508 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000509 void __iomem *addr;
510 u64 cap, ecap;
511
David Woodhouse86cf8982009-11-09 22:15:15 +0000512 drhd = (void *)entry_header;
513 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100514 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000515 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000516 }
Chris Wright2c992202009-12-02 09:17:13 +0000517
518 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
519 if (!addr ) {
520 printk("IOMMU: can't validate: %llx\n", drhd->address);
521 goto failed;
522 }
523 cap = dmar_readq(addr + DMAR_CAP_REG);
524 ecap = dmar_readq(addr + DMAR_ECAP_REG);
525 early_iounmap(addr, VTD_PAGE_SIZE);
526 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100527 warn_invalid_dmar(drhd->address,
528 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000529 goto failed;
530 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000531 }
532
533 entry_header = ((void *)entry_header + entry_header->length);
534 }
535 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000536
537failed:
Chris Wright2c992202009-12-02 09:17:13 +0000538 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000539}
540
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400541int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700542{
543 int ret;
544
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700545 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000546 if (ret)
547 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700548 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700549 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200550
Suresh Siddha1cb11582008-07-10 11:16:51 -0700551 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700552
Suresh Siddha95a02e92012-03-30 11:47:07 -0700553 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700554 dmar->flags & 0x1)
Donald Dutilee9071b02012-06-08 17:13:11 -0400555 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700556
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800557 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700558 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800559 /* Make sure ACS will be enabled */
560 pci_request_acs();
561 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700562
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900563#ifdef CONFIG_X86
564 if (ret)
565 x86_init.iommu.iommu_init = intel_iommu_init;
566#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700567 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800568 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700569 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400570
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400571 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700572}
573
574
Donald Dutile6f5cf522012-06-04 17:29:02 -0400575static void unmap_iommu(struct intel_iommu *iommu)
576{
577 iounmap(iommu->reg);
578 release_mem_region(iommu->reg_phys, iommu->reg_size);
579}
580
581/**
582 * map_iommu: map the iommu's registers
583 * @iommu: the iommu to map
584 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400585 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400586 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400587 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400588 */
589static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
590{
591 int map_size, err=0;
592
593 iommu->reg_phys = phys_addr;
594 iommu->reg_size = VTD_PAGE_SIZE;
595
596 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
597 pr_err("IOMMU: can't reserve memory\n");
598 err = -EBUSY;
599 goto out;
600 }
601
602 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
603 if (!iommu->reg) {
604 pr_err("IOMMU: can't map the region\n");
605 err = -ENOMEM;
606 goto release;
607 }
608
609 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
610 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
611
612 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
613 err = -EINVAL;
614 warn_invalid_dmar(phys_addr, " returns all ones");
615 goto unmap;
616 }
617
618 /* the registers might be more than one page */
619 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
620 cap_max_fault_reg_offset(iommu->cap));
621 map_size = VTD_PAGE_ALIGN(map_size);
622 if (map_size > iommu->reg_size) {
623 iounmap(iommu->reg);
624 release_mem_region(iommu->reg_phys, iommu->reg_size);
625 iommu->reg_size = map_size;
626 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
627 iommu->name)) {
628 pr_err("IOMMU: can't reserve memory\n");
629 err = -EBUSY;
630 goto out;
631 }
632 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
633 if (!iommu->reg) {
634 pr_err("IOMMU: can't map the region\n");
635 err = -ENOMEM;
636 goto release;
637 }
638 }
639 err = 0;
640 goto out;
641
642unmap:
643 iounmap(iommu->reg);
644release:
645 release_mem_region(iommu->reg_phys, iommu->reg_size);
646out:
647 return err;
648}
649
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700650int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700651{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700652 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900653 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700654 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100655 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700656 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400657 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700658
David Woodhouse6ecbf012009-12-02 09:20:27 +0000659 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100660 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000661 return -EINVAL;
662 }
663
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700664 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
665 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700666 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700667
668 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700669 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700670
Donald Dutile6f5cf522012-06-04 17:29:02 -0400671 err = map_iommu(iommu, drhd->reg_base_addr);
672 if (err) {
673 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700674 goto error;
675 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700676
Donald Dutile6f5cf522012-06-04 17:29:02 -0400677 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800678 agaw = iommu_calculate_agaw(iommu);
679 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400680 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
681 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100682 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700683 }
684 msagaw = iommu_calculate_max_sagaw(iommu);
685 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400686 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800687 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100688 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800689 }
690 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700691 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800692
Suresh Siddhaee34b322009-10-02 11:01:21 -0700693 iommu->node = -1;
694
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700695 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100696 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
697 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700698 (unsigned long long)drhd->reg_base_addr,
699 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
700 (unsigned long long)iommu->cap,
701 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700702
Takao Indoh3a93c842013-04-23 17:35:03 +0900703 /* Reflect status in gcmd */
704 sts = readl(iommu->reg + DMAR_GSTS_REG);
705 if (sts & DMA_GSTS_IRES)
706 iommu->gcmd |= DMA_GCMD_IRE;
707 if (sts & DMA_GSTS_TES)
708 iommu->gcmd |= DMA_GCMD_TE;
709 if (sts & DMA_GSTS_QIES)
710 iommu->gcmd |= DMA_GCMD_QIE;
711
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200712 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700713
714 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700715 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100716
717 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400718 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100719 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700720 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400721 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700722}
723
724void free_iommu(struct intel_iommu *iommu)
725{
726 if (!iommu)
727 return;
728
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700729 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700730
731 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400732 unmap_iommu(iommu);
733
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700734 kfree(iommu);
735}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700736
737/*
738 * Reclaim all the submitted descriptors which have completed its work.
739 */
740static inline void reclaim_free_desc(struct q_inval *qi)
741{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800742 while (qi->desc_status[qi->free_tail] == QI_DONE ||
743 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700744 qi->desc_status[qi->free_tail] = QI_FREE;
745 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
746 qi->free_cnt++;
747 }
748}
749
Yu Zhao704126a2009-01-04 16:28:52 +0800750static int qi_check_fault(struct intel_iommu *iommu, int index)
751{
752 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800753 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800754 struct q_inval *qi = iommu->qi;
755 int wait_index = (index + 1) % QI_LENGTH;
756
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800757 if (qi->desc_status[wait_index] == QI_ABORT)
758 return -EAGAIN;
759
Yu Zhao704126a2009-01-04 16:28:52 +0800760 fault = readl(iommu->reg + DMAR_FSTS_REG);
761
762 /*
763 * If IQE happens, the head points to the descriptor associated
764 * with the error. No new descriptors are fetched until the IQE
765 * is cleared.
766 */
767 if (fault & DMA_FSTS_IQE) {
768 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800769 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400770 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800771 "low=%llx, high=%llx\n",
772 (unsigned long long)qi->desc[index].low,
773 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800774 memcpy(&qi->desc[index], &qi->desc[wait_index],
775 sizeof(struct qi_desc));
776 __iommu_flush_cache(iommu, &qi->desc[index],
777 sizeof(struct qi_desc));
778 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
779 return -EINVAL;
780 }
781 }
782
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800783 /*
784 * If ITE happens, all pending wait_desc commands are aborted.
785 * No new descriptors are fetched until the ITE is cleared.
786 */
787 if (fault & DMA_FSTS_ITE) {
788 head = readl(iommu->reg + DMAR_IQH_REG);
789 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
790 head |= 1;
791 tail = readl(iommu->reg + DMAR_IQT_REG);
792 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
793
794 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
795
796 do {
797 if (qi->desc_status[head] == QI_IN_USE)
798 qi->desc_status[head] = QI_ABORT;
799 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
800 } while (head != tail);
801
802 if (qi->desc_status[wait_index] == QI_ABORT)
803 return -EAGAIN;
804 }
805
806 if (fault & DMA_FSTS_ICE)
807 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
808
Yu Zhao704126a2009-01-04 16:28:52 +0800809 return 0;
810}
811
Suresh Siddhafe962e92008-07-10 11:16:42 -0700812/*
813 * Submit the queued invalidation descriptor to the remapping
814 * hardware unit and wait for its completion.
815 */
Yu Zhao704126a2009-01-04 16:28:52 +0800816int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700817{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800818 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700819 struct q_inval *qi = iommu->qi;
820 struct qi_desc *hw, wait_desc;
821 int wait_index, index;
822 unsigned long flags;
823
824 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800825 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700826
827 hw = qi->desc;
828
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800829restart:
830 rc = 0;
831
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200832 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700833 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200834 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700835 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200836 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 }
838
839 index = qi->free_head;
840 wait_index = (index + 1) % QI_LENGTH;
841
842 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
843
844 hw[index] = *desc;
845
Yu Zhao704126a2009-01-04 16:28:52 +0800846 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
847 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700848 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
849
850 hw[wait_index] = wait_desc;
851
852 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
853 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
854
855 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
856 qi->free_cnt -= 2;
857
Suresh Siddhafe962e92008-07-10 11:16:42 -0700858 /*
859 * update the HW tail register indicating the presence of
860 * new descriptors.
861 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800862 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700863
864 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700865 /*
866 * We will leave the interrupts disabled, to prevent interrupt
867 * context to queue another cmd while a cmd is already submitted
868 * and waiting for completion on this cpu. This is to avoid
869 * a deadlock where the interrupt context can wait indefinitely
870 * for free slots in the queue.
871 */
Yu Zhao704126a2009-01-04 16:28:52 +0800872 rc = qi_check_fault(iommu, index);
873 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800874 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800875
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200876 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700877 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200878 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700879 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800880
881 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700882
883 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200884 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800885
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800886 if (rc == -EAGAIN)
887 goto restart;
888
Yu Zhao704126a2009-01-04 16:28:52 +0800889 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700890}
891
892/*
893 * Flush the global interrupt entry cache.
894 */
895void qi_global_iec(struct intel_iommu *iommu)
896{
897 struct qi_desc desc;
898
899 desc.low = QI_IEC_TYPE;
900 desc.high = 0;
901
Yu Zhao704126a2009-01-04 16:28:52 +0800902 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700903 qi_submit_sync(&desc, iommu);
904}
905
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100906void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
907 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700908{
Youquan Song3481f212008-10-16 16:31:55 -0700909 struct qi_desc desc;
910
Youquan Song3481f212008-10-16 16:31:55 -0700911 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
912 | QI_CC_GRAN(type) | QI_CC_TYPE;
913 desc.high = 0;
914
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100915 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700916}
917
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100918void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
919 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700920{
921 u8 dw = 0, dr = 0;
922
923 struct qi_desc desc;
924 int ih = 0;
925
Youquan Song3481f212008-10-16 16:31:55 -0700926 if (cap_write_drain(iommu->cap))
927 dw = 1;
928
929 if (cap_read_drain(iommu->cap))
930 dr = 1;
931
932 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
933 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
934 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
935 | QI_IOTLB_AM(size_order);
936
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100937 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700938}
939
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800940void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
941 u64 addr, unsigned mask)
942{
943 struct qi_desc desc;
944
945 if (mask) {
946 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
947 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
948 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
949 } else
950 desc.high = QI_DEV_IOTLB_ADDR(addr);
951
952 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
953 qdep = 0;
954
955 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
956 QI_DIOTLB_TYPE;
957
958 qi_submit_sync(&desc, iommu);
959}
960
Suresh Siddhafe962e92008-07-10 11:16:42 -0700961/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700962 * Disable Queued Invalidation interface.
963 */
964void dmar_disable_qi(struct intel_iommu *iommu)
965{
966 unsigned long flags;
967 u32 sts;
968 cycles_t start_time = get_cycles();
969
970 if (!ecap_qis(iommu->ecap))
971 return;
972
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200973 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700974
975 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
976 if (!(sts & DMA_GSTS_QIES))
977 goto end;
978
979 /*
980 * Give a chance to HW to complete the pending invalidation requests.
981 */
982 while ((readl(iommu->reg + DMAR_IQT_REG) !=
983 readl(iommu->reg + DMAR_IQH_REG)) &&
984 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
985 cpu_relax();
986
987 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700988 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
989
990 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
991 !(sts & DMA_GSTS_QIES), sts);
992end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200993 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700994}
995
996/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700997 * Enable queued invalidation.
998 */
999static void __dmar_enable_qi(struct intel_iommu *iommu)
1000{
David Woodhousec416daa2009-05-10 20:30:58 +01001001 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001002 unsigned long flags;
1003 struct q_inval *qi = iommu->qi;
1004
1005 qi->free_head = qi->free_tail = 0;
1006 qi->free_cnt = QI_LENGTH;
1007
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001008 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001009
1010 /* write zero to the tail reg */
1011 writel(0, iommu->reg + DMAR_IQT_REG);
1012
1013 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1014
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001015 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001016 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001017
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1020
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001021 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001022}
1023
1024/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001025 * Enable Queued Invalidation interface. This is a must to support
1026 * interrupt-remapping. Also used by DMA-remapping, which replaces
1027 * register based IOTLB invalidation.
1028 */
1029int dmar_enable_qi(struct intel_iommu *iommu)
1030{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001031 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001032 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001033
1034 if (!ecap_qis(iommu->ecap))
1035 return -ENOENT;
1036
1037 /*
1038 * queued invalidation is already setup and enabled.
1039 */
1040 if (iommu->qi)
1041 return 0;
1042
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001043 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001044 if (!iommu->qi)
1045 return -ENOMEM;
1046
1047 qi = iommu->qi;
1048
Suresh Siddha751cafe2009-10-02 11:01:22 -07001049
1050 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1051 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001052 kfree(qi);
1053 iommu->qi = 0;
1054 return -ENOMEM;
1055 }
1056
Suresh Siddha751cafe2009-10-02 11:01:22 -07001057 qi->desc = page_address(desc_page);
1058
Hannes Reinecke37a40712013-02-06 09:50:10 +01001059 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001060 if (!qi->desc_status) {
1061 free_page((unsigned long) qi->desc);
1062 kfree(qi);
1063 iommu->qi = 0;
1064 return -ENOMEM;
1065 }
1066
1067 qi->free_head = qi->free_tail = 0;
1068 qi->free_cnt = QI_LENGTH;
1069
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001070 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001071
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001072 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001073
1074 return 0;
1075}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001076
1077/* iommu interrupt handling. Most stuff are MSI-like. */
1078
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001079enum faulttype {
1080 DMA_REMAP,
1081 INTR_REMAP,
1082 UNKNOWN,
1083};
1084
1085static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001086{
1087 "Software",
1088 "Present bit in root entry is clear",
1089 "Present bit in context entry is clear",
1090 "Invalid context entry",
1091 "Access beyond MGAW",
1092 "PTE Write access is not set",
1093 "PTE Read access is not set",
1094 "Next page table ptr is invalid",
1095 "Root table address invalid",
1096 "Context table ptr is invalid",
1097 "non-zero reserved fields in RTP",
1098 "non-zero reserved fields in CTP",
1099 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001100 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001101};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001102
Suresh Siddha95a02e92012-03-30 11:47:07 -07001103static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001104{
1105 "Detected reserved fields in the decoded interrupt-remapped request",
1106 "Interrupt index exceeded the interrupt-remapping table size",
1107 "Present field in the IRTE entry is clear",
1108 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1109 "Detected reserved fields in the IRTE entry",
1110 "Blocked a compatibility format interrupt request",
1111 "Blocked an interrupt request due to source-id verification failure",
1112};
1113
Suresh Siddha0ac24912009-03-16 17:04:54 -07001114#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1115
Rashika Kheria21004dc2013-12-18 12:01:46 +05301116static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001117{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001118 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001120 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001121 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001122 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1123 *fault_type = DMA_REMAP;
1124 return dma_remap_fault_reasons[fault_reason];
1125 } else {
1126 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001127 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001128 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001129}
1130
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001131void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001132{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001133 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001134 unsigned long flag;
1135
1136 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001138 writel(0, iommu->reg + DMAR_FECTL_REG);
1139 /* Read a reg to force flush the post write */
1140 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001141 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001142}
1143
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001144void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001145{
1146 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001147 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001148
1149 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001151 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001155}
1156
1157void dmar_msi_write(int irq, struct msi_msg *msg)
1158{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001159 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001160 unsigned long flag;
1161
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001163 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1164 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1165 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001167}
1168
1169void dmar_msi_read(int irq, struct msi_msg *msg)
1170{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001171 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001172 unsigned long flag;
1173
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001175 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1176 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1177 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001178 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001179}
1180
1181static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1182 u8 fault_reason, u16 source_id, unsigned long long addr)
1183{
1184 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001185 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001186
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001187 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001188
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001189 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001190 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001191 "fault index %llx\n"
1192 "INTR-REMAP:[fault reason %02d] %s\n",
1193 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1194 PCI_FUNC(source_id & 0xFF), addr >> 48,
1195 fault_reason, reason);
1196 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001197 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001198 "fault addr %llx \n"
1199 "DMAR:[fault reason %02d] %s\n",
1200 (type ? "DMA Read" : "DMA Write"),
1201 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1202 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001203 return 0;
1204}
1205
1206#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001207irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001208{
1209 struct intel_iommu *iommu = dev_id;
1210 int reg, fault_index;
1211 u32 fault_status;
1212 unsigned long flag;
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001215 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001216 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001217 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001218
1219 /* TBD: ignore advanced fault log currently */
1220 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001221 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001222
1223 fault_index = dma_fsts_fault_record_index(fault_status);
1224 reg = cap_fault_reg_offset(iommu->cap);
1225 while (1) {
1226 u8 fault_reason;
1227 u16 source_id;
1228 u64 guest_addr;
1229 int type;
1230 u32 data;
1231
1232 /* highest 32 bits */
1233 data = readl(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1235 if (!(data & DMA_FRCD_F))
1236 break;
1237
1238 fault_reason = dma_frcd_fault_reason(data);
1239 type = dma_frcd_type(data);
1240
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1243 source_id = dma_frcd_source_id(data);
1244
1245 guest_addr = dmar_readq(iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN);
1247 guest_addr = dma_frcd_page_addr(guest_addr);
1248 /* clear the fault */
1249 writel(DMA_FRCD_F, iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001252 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001253
1254 dmar_fault_do_one(iommu, type, fault_reason,
1255 source_id, guest_addr);
1256
1257 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001258 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001259 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001260 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001261 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001262
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001263 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1264
1265unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001266 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001267 return IRQ_HANDLED;
1268}
1269
1270int dmar_set_interrupt(struct intel_iommu *iommu)
1271{
1272 int irq, ret;
1273
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001274 /*
1275 * Check if the fault interrupt is already initialized.
1276 */
1277 if (iommu->irq)
1278 return 0;
1279
Suresh Siddha0ac24912009-03-16 17:04:54 -07001280 irq = create_irq();
1281 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001282 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001283 return -EINVAL;
1284 }
1285
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001286 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001287 iommu->irq = irq;
1288
1289 ret = arch_setup_dmar_msi(irq);
1290 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001291 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001292 iommu->irq = 0;
1293 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001294 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001295 }
1296
Thomas Gleixner477694e2011-07-19 16:25:42 +02001297 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001298 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001299 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001300 return ret;
1301}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001302
1303int __init enable_drhd_fault_handling(void)
1304{
1305 struct dmar_drhd_unit *drhd;
1306
1307 /*
1308 * Enable fault control interrupt.
1309 */
1310 for_each_drhd_unit(drhd) {
1311 int ret;
1312 struct intel_iommu *iommu = drhd->iommu;
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001313 u32 fault_status;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001314 ret = dmar_set_interrupt(iommu);
1315
1316 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001318 (unsigned long long)drhd->reg_base_addr, ret);
1319 return -1;
1320 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001321
1322 /*
1323 * Clear any previous faults.
1324 */
1325 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001326 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1327 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001328 }
1329
1330 return 0;
1331}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001332
1333/*
1334 * Re-enable Queued Invalidation interface.
1335 */
1336int dmar_reenable_qi(struct intel_iommu *iommu)
1337{
1338 if (!ecap_qis(iommu->ecap))
1339 return -ENOENT;
1340
1341 if (!iommu->qi)
1342 return -ENOENT;
1343
1344 /*
1345 * First disable queued invalidation.
1346 */
1347 dmar_disable_qi(iommu);
1348 /*
1349 * Then enable queued invalidation again. Since there is no pending
1350 * invalidation requests now, it's safe to re-enable queued
1351 * invalidation.
1352 */
1353 __dmar_enable_qi(iommu);
1354
1355 return 0;
1356}
Youquan Song074835f2009-09-09 12:05:39 -04001357
1358/*
1359 * Check interrupt remapping support in DMAR table description.
1360 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001361int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001362{
1363 struct acpi_table_dmar *dmar;
1364 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001365 if (!dmar)
1366 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001367 return dmar->flags & 0x1;
1368}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001369IOMMU_INIT_POST(detect_intel_iommu);