blob: 726cfe296d9909197a44f3a622b5f9e6d2e435a9 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
Jiang Liu694835d2014-01-06 14:18:16 +080055static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080056static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080057
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040091 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 break;
93 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080094 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070095 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040096 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070097 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800105 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
135 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400136 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100137 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700138 start += scope->length;
139 }
140 if (*cnt == 0)
141 return 0;
142
143 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144 if (!*devices)
145 return -ENOMEM;
146
147 start = tmp;
148 index = 0;
149 while (start < end) {
150 scope = start;
151 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153 ret = dmar_parse_one_dev_scope(scope,
154 &(*devices)[index], segment);
155 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800156 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700157 return ret;
158 }
159 index ++;
160 }
161 start += scope->length;
162 }
163
164 return 0;
165}
166
Jiang Liuada4d4b2014-01-06 14:18:09 +0800167void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
168{
169 if (*devices && *cnt) {
170 while (--*cnt >= 0)
171 pci_dev_put((*devices)[*cnt]);
172 kfree(*devices);
173 *devices = NULL;
174 *cnt = 0;
175 }
176}
177
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700178/**
179 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
180 * structure which uniquely represent one DMA remapping hardware unit
181 * present in the platform
182 */
183static int __init
184dmar_parse_one_drhd(struct acpi_dmar_header *header)
185{
186 struct acpi_dmar_hardware_unit *drhd;
187 struct dmar_drhd_unit *dmaru;
188 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700189
David Woodhousee523b382009-04-10 22:27:48 -0700190 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700191 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192 if (!dmaru)
193 return -ENOMEM;
194
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700195 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700196 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100197 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700198 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
199
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700200 ret = alloc_iommu(dmaru);
201 if (ret) {
202 kfree(dmaru);
203 return ret;
204 }
205 dmar_register_drhd_unit(dmaru);
206 return 0;
207}
208
Jiang Liua868e6b2014-01-06 14:18:20 +0800209static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
210{
211 if (dmaru->devices && dmaru->devices_cnt)
212 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
213 if (dmaru->iommu)
214 free_iommu(dmaru->iommu);
215 kfree(dmaru);
216}
217
David Woodhousef82851a2008-10-18 15:43:14 +0100218static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219{
220 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700221
222 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
223
Yu Zhao2e824f72008-12-22 16:54:58 +0800224 if (dmaru->include_all)
225 return 0;
226
Jiang Liua868e6b2014-01-06 14:18:20 +0800227 return dmar_parse_dev_scope((void *)(drhd + 1),
228 ((void *)drhd) + drhd->header.length,
229 &dmaru->devices_cnt, &dmaru->devices,
230 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700231}
232
David Woodhouseaa697072009-10-07 12:18:00 +0100233#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700234static int __init
235dmar_parse_one_rhsa(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_rhsa *rhsa;
238 struct dmar_drhd_unit *drhd;
239
240 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100241 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700242 if (drhd->reg_base_addr == rhsa->base_address) {
243 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
244
245 if (!node_online(node))
246 node = -1;
247 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100248 return 0;
249 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700250 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100251 WARN_TAINT(
252 1, TAINT_FIRMWARE_WORKAROUND,
253 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
254 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
255 drhd->reg_base_addr,
256 dmi_get_system_info(DMI_BIOS_VENDOR),
257 dmi_get_system_info(DMI_BIOS_VERSION),
258 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700259
David Woodhouseaa697072009-10-07 12:18:00 +0100260 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700261}
David Woodhouseaa697072009-10-07 12:18:00 +0100262#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700263
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700264static void __init
265dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266{
267 struct acpi_dmar_hardware_unit *drhd;
268 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800269 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700270 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700271
272 switch (header->type) {
273 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800274 drhd = container_of(header, struct acpi_dmar_hardware_unit,
275 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400276 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800277 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700278 break;
279 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800280 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
281 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400282 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700283 (unsigned long long)rmrr->base_address,
284 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700285 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800286 case ACPI_DMAR_TYPE_ATSR:
287 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400288 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800289 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700290 case ACPI_DMAR_HARDWARE_AFFINITY:
291 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400292 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700293 (unsigned long long)rhsa->base_address,
294 rhsa->proximity_domain);
295 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700296 }
297}
298
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700299/**
300 * dmar_table_detect - checks to see if the platform supports DMAR devices
301 */
302static int __init dmar_table_detect(void)
303{
304 acpi_status status = AE_OK;
305
306 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800307 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
308 (struct acpi_table_header **)&dmar_tbl,
309 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700310
311 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400312 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700313 status = AE_NOT_FOUND;
314 }
315
316 return (ACPI_SUCCESS(status) ? 1 : 0);
317}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700318
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700319/**
320 * parse_dmar_table - parses the DMA reporting table
321 */
322static int __init
323parse_dmar_table(void)
324{
325 struct acpi_table_dmar *dmar;
326 struct acpi_dmar_header *entry_header;
327 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800328 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700329
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700330 /*
331 * Do it again, earlier dmar_tbl mapping could be mapped with
332 * fixed map.
333 */
334 dmar_table_detect();
335
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700336 /*
337 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
338 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
339 */
340 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
341
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700342 dmar = (struct acpi_table_dmar *)dmar_tbl;
343 if (!dmar)
344 return -ENODEV;
345
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700346 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400347 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700348 return -EINVAL;
349 }
350
Donald Dutilee9071b02012-06-08 17:13:11 -0400351 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700352
353 entry_header = (struct acpi_dmar_header *)(dmar + 1);
354 while (((unsigned long)entry_header) <
355 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800356 /* Avoid looping forever on bad ACPI tables */
357 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400358 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800359 ret = -EINVAL;
360 break;
361 }
362
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700363 dmar_table_print_dmar_entry(entry_header);
364
365 switch (entry_header->type) {
366 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800367 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700368 ret = dmar_parse_one_drhd(entry_header);
369 break;
370 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
371 ret = dmar_parse_one_rmrr(entry_header);
372 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800373 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800374 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800375 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700376 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100377#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700378 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100379#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700380 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700381 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400382 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100383 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700384 ret = 0; /* for forward compatibility */
385 break;
386 }
387 if (ret)
388 break;
389
390 entry_header = ((void *)entry_header + entry_header->length);
391 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800392 if (drhd_count == 0)
393 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700394 return ret;
395}
396
Yinghaidda56542010-04-09 01:07:55 +0100397static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700398 struct pci_dev *dev)
399{
400 int index;
401
402 while (dev) {
403 for (index = 0; index < cnt; index++)
404 if (dev == devices[index])
405 return 1;
406
407 /* Check our parent */
408 dev = dev->bus->self;
409 }
410
411 return 0;
412}
413
414struct dmar_drhd_unit *
415dmar_find_matched_drhd_unit(struct pci_dev *dev)
416{
Yu Zhao2e824f72008-12-22 16:54:58 +0800417 struct dmar_drhd_unit *dmaru = NULL;
418 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700419
Yinghaidda56542010-04-09 01:07:55 +0100420 dev = pci_physfn(dev);
421
Yijing Wang8b161f02013-10-31 17:25:16 +0800422 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800423 drhd = container_of(dmaru->hdr,
424 struct acpi_dmar_hardware_unit,
425 header);
426
427 if (dmaru->include_all &&
428 drhd->segment == pci_domain_nr(dev->bus))
429 return dmaru;
430
431 if (dmar_pci_device_match(dmaru->devices,
432 dmaru->devices_cnt, dev))
433 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700434 }
435
436 return NULL;
437}
438
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700439int __init dmar_dev_scope_init(void)
440{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700441 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800442 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700443 int ret = -ENODEV;
444
Suresh Siddhac2c72862011-08-23 17:05:19 -0700445 if (dmar_dev_scope_initialized)
446 return dmar_dev_scope_initialized;
447
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700448 if (list_empty(&dmar_drhd_units))
449 goto fail;
450
Jiang Liua868e6b2014-01-06 14:18:20 +0800451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700452 ret = dmar_parse_dev(drhd);
453 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700454 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700455 }
456
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700457 ret = dmar_parse_rmrr_atsr_dev();
458 if (ret)
459 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700460
Suresh Siddhac2c72862011-08-23 17:05:19 -0700461 dmar_dev_scope_initialized = 1;
462 return 0;
463
464fail:
465 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700466 return ret;
467}
468
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700469
470int __init dmar_table_init(void)
471{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700472 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800473 int ret;
474
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700475 if (dmar_table_initialized)
476 return 0;
477
478 dmar_table_initialized = 1;
479
Fenghua Yu093f87d2007-11-21 15:07:14 -0800480 ret = parse_dmar_table();
481 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700482 if (ret != -ENODEV)
Donald Dutilee9071b02012-06-08 17:13:11 -0400483 pr_info("parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800484 return ret;
485 }
486
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700487 if (list_empty(&dmar_drhd_units)) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400488 pr_info("No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700489 return -ENODEV;
490 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800491
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700492 return 0;
493}
494
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100495static void warn_invalid_dmar(u64 addr, const char *message)
496{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100497 WARN_TAINT_ONCE(
498 1, TAINT_FIRMWARE_WORKAROUND,
499 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
500 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
501 addr, message,
502 dmi_get_system_info(DMI_BIOS_VENDOR),
503 dmi_get_system_info(DMI_BIOS_VERSION),
504 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100505}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000506
Rashika Kheria21004dc2013-12-18 12:01:46 +0530507static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000508{
509 struct acpi_table_dmar *dmar;
510 struct acpi_dmar_header *entry_header;
511 struct acpi_dmar_hardware_unit *drhd;
512
513 dmar = (struct acpi_table_dmar *)dmar_tbl;
514 entry_header = (struct acpi_dmar_header *)(dmar + 1);
515
516 while (((unsigned long)entry_header) <
517 (((unsigned long)dmar) + dmar_tbl->length)) {
518 /* Avoid looping forever on bad ACPI tables */
519 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400520 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000521 return 0;
522 }
523
524 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000525 void __iomem *addr;
526 u64 cap, ecap;
527
David Woodhouse86cf8982009-11-09 22:15:15 +0000528 drhd = (void *)entry_header;
529 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100530 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000531 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000532 }
Chris Wright2c992202009-12-02 09:17:13 +0000533
534 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
535 if (!addr ) {
536 printk("IOMMU: can't validate: %llx\n", drhd->address);
537 goto failed;
538 }
539 cap = dmar_readq(addr + DMAR_CAP_REG);
540 ecap = dmar_readq(addr + DMAR_ECAP_REG);
541 early_iounmap(addr, VTD_PAGE_SIZE);
542 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100543 warn_invalid_dmar(drhd->address,
544 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000545 goto failed;
546 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000547 }
548
549 entry_header = ((void *)entry_header + entry_header->length);
550 }
551 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000552
553failed:
Chris Wright2c992202009-12-02 09:17:13 +0000554 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000555}
556
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400557int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700558{
559 int ret;
560
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700561 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000562 if (ret)
563 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700564 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800565 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700566 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800567 /* Make sure ACS will be enabled */
568 pci_request_acs();
569 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700570
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900571#ifdef CONFIG_X86
572 if (ret)
573 x86_init.iommu.iommu_init = intel_iommu_init;
574#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700575 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800576 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700577 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400578
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400579 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700580}
581
582
Donald Dutile6f5cf522012-06-04 17:29:02 -0400583static void unmap_iommu(struct intel_iommu *iommu)
584{
585 iounmap(iommu->reg);
586 release_mem_region(iommu->reg_phys, iommu->reg_size);
587}
588
589/**
590 * map_iommu: map the iommu's registers
591 * @iommu: the iommu to map
592 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400593 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400594 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400595 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400596 */
597static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
598{
599 int map_size, err=0;
600
601 iommu->reg_phys = phys_addr;
602 iommu->reg_size = VTD_PAGE_SIZE;
603
604 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
605 pr_err("IOMMU: can't reserve memory\n");
606 err = -EBUSY;
607 goto out;
608 }
609
610 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
611 if (!iommu->reg) {
612 pr_err("IOMMU: can't map the region\n");
613 err = -ENOMEM;
614 goto release;
615 }
616
617 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
618 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
619
620 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
621 err = -EINVAL;
622 warn_invalid_dmar(phys_addr, " returns all ones");
623 goto unmap;
624 }
625
626 /* the registers might be more than one page */
627 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
628 cap_max_fault_reg_offset(iommu->cap));
629 map_size = VTD_PAGE_ALIGN(map_size);
630 if (map_size > iommu->reg_size) {
631 iounmap(iommu->reg);
632 release_mem_region(iommu->reg_phys, iommu->reg_size);
633 iommu->reg_size = map_size;
634 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
635 iommu->name)) {
636 pr_err("IOMMU: can't reserve memory\n");
637 err = -EBUSY;
638 goto out;
639 }
640 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
641 if (!iommu->reg) {
642 pr_err("IOMMU: can't map the region\n");
643 err = -ENOMEM;
644 goto release;
645 }
646 }
647 err = 0;
648 goto out;
649
650unmap:
651 iounmap(iommu->reg);
652release:
653 release_mem_region(iommu->reg_phys, iommu->reg_size);
654out:
655 return err;
656}
657
Jiang Liu694835d2014-01-06 14:18:16 +0800658static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700659{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700660 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900661 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700662 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100663 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700664 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400665 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700666
David Woodhouse6ecbf012009-12-02 09:20:27 +0000667 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100668 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000669 return -EINVAL;
670 }
671
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700672 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
673 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700674 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700675
676 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700677 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700678
Donald Dutile6f5cf522012-06-04 17:29:02 -0400679 err = map_iommu(iommu, drhd->reg_base_addr);
680 if (err) {
681 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700682 goto error;
683 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700684
Donald Dutile6f5cf522012-06-04 17:29:02 -0400685 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800686 agaw = iommu_calculate_agaw(iommu);
687 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400688 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
689 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100690 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700691 }
692 msagaw = iommu_calculate_max_sagaw(iommu);
693 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400694 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800695 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100696 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800697 }
698 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700699 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800700
Suresh Siddhaee34b322009-10-02 11:01:21 -0700701 iommu->node = -1;
702
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700703 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100704 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
705 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700706 (unsigned long long)drhd->reg_base_addr,
707 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
708 (unsigned long long)iommu->cap,
709 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700710
Takao Indoh3a93c842013-04-23 17:35:03 +0900711 /* Reflect status in gcmd */
712 sts = readl(iommu->reg + DMAR_GSTS_REG);
713 if (sts & DMA_GSTS_IRES)
714 iommu->gcmd |= DMA_GCMD_IRE;
715 if (sts & DMA_GSTS_TES)
716 iommu->gcmd |= DMA_GCMD_TE;
717 if (sts & DMA_GSTS_QIES)
718 iommu->gcmd |= DMA_GCMD_QIE;
719
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200720 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700721
722 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700723 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100724
725 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400726 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100727 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700728 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400729 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700730}
731
Jiang Liua868e6b2014-01-06 14:18:20 +0800732static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700733{
Jiang Liua868e6b2014-01-06 14:18:20 +0800734 if (iommu->irq) {
735 free_irq(iommu->irq, iommu);
736 irq_set_handler_data(iommu->irq, NULL);
737 destroy_irq(iommu->irq);
738 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700739
Jiang Liua84da702014-01-06 14:18:23 +0800740 if (iommu->qi) {
741 free_page((unsigned long)iommu->qi->desc);
742 kfree(iommu->qi->desc_status);
743 kfree(iommu->qi);
744 }
745
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700746 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400747 unmap_iommu(iommu);
748
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700749 kfree(iommu);
750}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700751
752/*
753 * Reclaim all the submitted descriptors which have completed its work.
754 */
755static inline void reclaim_free_desc(struct q_inval *qi)
756{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800757 while (qi->desc_status[qi->free_tail] == QI_DONE ||
758 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700759 qi->desc_status[qi->free_tail] = QI_FREE;
760 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
761 qi->free_cnt++;
762 }
763}
764
Yu Zhao704126a2009-01-04 16:28:52 +0800765static int qi_check_fault(struct intel_iommu *iommu, int index)
766{
767 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800768 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800769 struct q_inval *qi = iommu->qi;
770 int wait_index = (index + 1) % QI_LENGTH;
771
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800772 if (qi->desc_status[wait_index] == QI_ABORT)
773 return -EAGAIN;
774
Yu Zhao704126a2009-01-04 16:28:52 +0800775 fault = readl(iommu->reg + DMAR_FSTS_REG);
776
777 /*
778 * If IQE happens, the head points to the descriptor associated
779 * with the error. No new descriptors are fetched until the IQE
780 * is cleared.
781 */
782 if (fault & DMA_FSTS_IQE) {
783 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800784 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400785 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800786 "low=%llx, high=%llx\n",
787 (unsigned long long)qi->desc[index].low,
788 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800789 memcpy(&qi->desc[index], &qi->desc[wait_index],
790 sizeof(struct qi_desc));
791 __iommu_flush_cache(iommu, &qi->desc[index],
792 sizeof(struct qi_desc));
793 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
794 return -EINVAL;
795 }
796 }
797
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800798 /*
799 * If ITE happens, all pending wait_desc commands are aborted.
800 * No new descriptors are fetched until the ITE is cleared.
801 */
802 if (fault & DMA_FSTS_ITE) {
803 head = readl(iommu->reg + DMAR_IQH_REG);
804 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
805 head |= 1;
806 tail = readl(iommu->reg + DMAR_IQT_REG);
807 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808
809 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
810
811 do {
812 if (qi->desc_status[head] == QI_IN_USE)
813 qi->desc_status[head] = QI_ABORT;
814 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
815 } while (head != tail);
816
817 if (qi->desc_status[wait_index] == QI_ABORT)
818 return -EAGAIN;
819 }
820
821 if (fault & DMA_FSTS_ICE)
822 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
823
Yu Zhao704126a2009-01-04 16:28:52 +0800824 return 0;
825}
826
Suresh Siddhafe962e92008-07-10 11:16:42 -0700827/*
828 * Submit the queued invalidation descriptor to the remapping
829 * hardware unit and wait for its completion.
830 */
Yu Zhao704126a2009-01-04 16:28:52 +0800831int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700832{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800833 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700834 struct q_inval *qi = iommu->qi;
835 struct qi_desc *hw, wait_desc;
836 int wait_index, index;
837 unsigned long flags;
838
839 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800840 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700841
842 hw = qi->desc;
843
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800844restart:
845 rc = 0;
846
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200847 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700848 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200849 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700850 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200851 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700852 }
853
854 index = qi->free_head;
855 wait_index = (index + 1) % QI_LENGTH;
856
857 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
858
859 hw[index] = *desc;
860
Yu Zhao704126a2009-01-04 16:28:52 +0800861 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
862 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700863 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
864
865 hw[wait_index] = wait_desc;
866
867 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
868 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
869
870 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
871 qi->free_cnt -= 2;
872
Suresh Siddhafe962e92008-07-10 11:16:42 -0700873 /*
874 * update the HW tail register indicating the presence of
875 * new descriptors.
876 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800877 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700878
879 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700880 /*
881 * We will leave the interrupts disabled, to prevent interrupt
882 * context to queue another cmd while a cmd is already submitted
883 * and waiting for completion on this cpu. This is to avoid
884 * a deadlock where the interrupt context can wait indefinitely
885 * for free slots in the queue.
886 */
Yu Zhao704126a2009-01-04 16:28:52 +0800887 rc = qi_check_fault(iommu, index);
888 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800889 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800890
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200891 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700892 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200893 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700894 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800895
896 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700897
898 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200899 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800900
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800901 if (rc == -EAGAIN)
902 goto restart;
903
Yu Zhao704126a2009-01-04 16:28:52 +0800904 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700905}
906
907/*
908 * Flush the global interrupt entry cache.
909 */
910void qi_global_iec(struct intel_iommu *iommu)
911{
912 struct qi_desc desc;
913
914 desc.low = QI_IEC_TYPE;
915 desc.high = 0;
916
Yu Zhao704126a2009-01-04 16:28:52 +0800917 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700918 qi_submit_sync(&desc, iommu);
919}
920
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100921void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
922 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700923{
Youquan Song3481f212008-10-16 16:31:55 -0700924 struct qi_desc desc;
925
Youquan Song3481f212008-10-16 16:31:55 -0700926 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
927 | QI_CC_GRAN(type) | QI_CC_TYPE;
928 desc.high = 0;
929
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100930 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700931}
932
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100933void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
934 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700935{
936 u8 dw = 0, dr = 0;
937
938 struct qi_desc desc;
939 int ih = 0;
940
Youquan Song3481f212008-10-16 16:31:55 -0700941 if (cap_write_drain(iommu->cap))
942 dw = 1;
943
944 if (cap_read_drain(iommu->cap))
945 dr = 1;
946
947 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
948 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
949 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
950 | QI_IOTLB_AM(size_order);
951
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100952 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700953}
954
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800955void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
956 u64 addr, unsigned mask)
957{
958 struct qi_desc desc;
959
960 if (mask) {
961 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
962 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
963 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
964 } else
965 desc.high = QI_DEV_IOTLB_ADDR(addr);
966
967 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
968 qdep = 0;
969
970 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
971 QI_DIOTLB_TYPE;
972
973 qi_submit_sync(&desc, iommu);
974}
975
Suresh Siddhafe962e92008-07-10 11:16:42 -0700976/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700977 * Disable Queued Invalidation interface.
978 */
979void dmar_disable_qi(struct intel_iommu *iommu)
980{
981 unsigned long flags;
982 u32 sts;
983 cycles_t start_time = get_cycles();
984
985 if (!ecap_qis(iommu->ecap))
986 return;
987
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200988 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700989
990 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
991 if (!(sts & DMA_GSTS_QIES))
992 goto end;
993
994 /*
995 * Give a chance to HW to complete the pending invalidation requests.
996 */
997 while ((readl(iommu->reg + DMAR_IQT_REG) !=
998 readl(iommu->reg + DMAR_IQH_REG)) &&
999 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1000 cpu_relax();
1001
1002 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001003 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1004
1005 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1006 !(sts & DMA_GSTS_QIES), sts);
1007end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001008 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001009}
1010
1011/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001012 * Enable queued invalidation.
1013 */
1014static void __dmar_enable_qi(struct intel_iommu *iommu)
1015{
David Woodhousec416daa2009-05-10 20:30:58 +01001016 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001017 unsigned long flags;
1018 struct q_inval *qi = iommu->qi;
1019
1020 qi->free_head = qi->free_tail = 0;
1021 qi->free_cnt = QI_LENGTH;
1022
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001023 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001024
1025 /* write zero to the tail reg */
1026 writel(0, iommu->reg + DMAR_IQT_REG);
1027
1028 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1029
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001030 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001031 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001032
1033 /* Make sure hardware complete it */
1034 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1035
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001036 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001037}
1038
1039/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001040 * Enable Queued Invalidation interface. This is a must to support
1041 * interrupt-remapping. Also used by DMA-remapping, which replaces
1042 * register based IOTLB invalidation.
1043 */
1044int dmar_enable_qi(struct intel_iommu *iommu)
1045{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001046 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001047 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001048
1049 if (!ecap_qis(iommu->ecap))
1050 return -ENOENT;
1051
1052 /*
1053 * queued invalidation is already setup and enabled.
1054 */
1055 if (iommu->qi)
1056 return 0;
1057
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001058 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001059 if (!iommu->qi)
1060 return -ENOMEM;
1061
1062 qi = iommu->qi;
1063
Suresh Siddha751cafe2009-10-02 11:01:22 -07001064
1065 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1066 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001067 kfree(qi);
1068 iommu->qi = 0;
1069 return -ENOMEM;
1070 }
1071
Suresh Siddha751cafe2009-10-02 11:01:22 -07001072 qi->desc = page_address(desc_page);
1073
Hannes Reinecke37a40712013-02-06 09:50:10 +01001074 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001075 if (!qi->desc_status) {
1076 free_page((unsigned long) qi->desc);
1077 kfree(qi);
1078 iommu->qi = 0;
1079 return -ENOMEM;
1080 }
1081
1082 qi->free_head = qi->free_tail = 0;
1083 qi->free_cnt = QI_LENGTH;
1084
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001085 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001086
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001087 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001088
1089 return 0;
1090}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001091
1092/* iommu interrupt handling. Most stuff are MSI-like. */
1093
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001094enum faulttype {
1095 DMA_REMAP,
1096 INTR_REMAP,
1097 UNKNOWN,
1098};
1099
1100static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001101{
1102 "Software",
1103 "Present bit in root entry is clear",
1104 "Present bit in context entry is clear",
1105 "Invalid context entry",
1106 "Access beyond MGAW",
1107 "PTE Write access is not set",
1108 "PTE Read access is not set",
1109 "Next page table ptr is invalid",
1110 "Root table address invalid",
1111 "Context table ptr is invalid",
1112 "non-zero reserved fields in RTP",
1113 "non-zero reserved fields in CTP",
1114 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001115 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001116};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001117
Suresh Siddha95a02e92012-03-30 11:47:07 -07001118static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001119{
1120 "Detected reserved fields in the decoded interrupt-remapped request",
1121 "Interrupt index exceeded the interrupt-remapping table size",
1122 "Present field in the IRTE entry is clear",
1123 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1124 "Detected reserved fields in the IRTE entry",
1125 "Blocked a compatibility format interrupt request",
1126 "Blocked an interrupt request due to source-id verification failure",
1127};
1128
Rashika Kheria21004dc2013-12-18 12:01:46 +05301129static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001130{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001131 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1132 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001133 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001134 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001135 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1136 *fault_type = DMA_REMAP;
1137 return dma_remap_fault_reasons[fault_reason];
1138 } else {
1139 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001140 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001141 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001142}
1143
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001144void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001145{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001146 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001147 unsigned long flag;
1148
1149 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001151 writel(0, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001155}
1156
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001157void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001158{
1159 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001160 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001161
1162 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001163 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001164 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1165 /* Read a reg to force flush the post write */
1166 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001167 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001168}
1169
1170void dmar_msi_write(int irq, struct msi_msg *msg)
1171{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001172 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001173 unsigned long flag;
1174
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001176 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1177 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1178 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001179 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001180}
1181
1182void dmar_msi_read(int irq, struct msi_msg *msg)
1183{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001184 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001185 unsigned long flag;
1186
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001187 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001188 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1189 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1190 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001191 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001192}
1193
1194static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1195 u8 fault_reason, u16 source_id, unsigned long long addr)
1196{
1197 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001198 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001199
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001200 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001201
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001202 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001203 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001204 "fault index %llx\n"
1205 "INTR-REMAP:[fault reason %02d] %s\n",
1206 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1207 PCI_FUNC(source_id & 0xFF), addr >> 48,
1208 fault_reason, reason);
1209 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001210 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001211 "fault addr %llx \n"
1212 "DMAR:[fault reason %02d] %s\n",
1213 (type ? "DMA Read" : "DMA Write"),
1214 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1215 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001216 return 0;
1217}
1218
1219#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001220irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001221{
1222 struct intel_iommu *iommu = dev_id;
1223 int reg, fault_index;
1224 u32 fault_status;
1225 unsigned long flag;
1226
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001227 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001228 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001229 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001230 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001231
1232 /* TBD: ignore advanced fault log currently */
1233 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001234 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001235
1236 fault_index = dma_fsts_fault_record_index(fault_status);
1237 reg = cap_fault_reg_offset(iommu->cap);
1238 while (1) {
1239 u8 fault_reason;
1240 u16 source_id;
1241 u64 guest_addr;
1242 int type;
1243 u32 data;
1244
1245 /* highest 32 bits */
1246 data = readl(iommu->reg + reg +
1247 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1248 if (!(data & DMA_FRCD_F))
1249 break;
1250
1251 fault_reason = dma_frcd_fault_reason(data);
1252 type = dma_frcd_type(data);
1253
1254 data = readl(iommu->reg + reg +
1255 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1256 source_id = dma_frcd_source_id(data);
1257
1258 guest_addr = dmar_readq(iommu->reg + reg +
1259 fault_index * PRIMARY_FAULT_REG_LEN);
1260 guest_addr = dma_frcd_page_addr(guest_addr);
1261 /* clear the fault */
1262 writel(DMA_FRCD_F, iommu->reg + reg +
1263 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1264
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001266
1267 dmar_fault_do_one(iommu, type, fault_reason,
1268 source_id, guest_addr);
1269
1270 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001271 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001272 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001273 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001274 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001275
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001276 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1277
1278unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001279 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001280 return IRQ_HANDLED;
1281}
1282
1283int dmar_set_interrupt(struct intel_iommu *iommu)
1284{
1285 int irq, ret;
1286
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001287 /*
1288 * Check if the fault interrupt is already initialized.
1289 */
1290 if (iommu->irq)
1291 return 0;
1292
Suresh Siddha0ac24912009-03-16 17:04:54 -07001293 irq = create_irq();
1294 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001295 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001296 return -EINVAL;
1297 }
1298
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001299 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001300 iommu->irq = irq;
1301
1302 ret = arch_setup_dmar_msi(irq);
1303 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001304 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001305 iommu->irq = 0;
1306 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001307 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001308 }
1309
Thomas Gleixner477694e2011-07-19 16:25:42 +02001310 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001311 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001312 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001313 return ret;
1314}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001315
1316int __init enable_drhd_fault_handling(void)
1317{
1318 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001319 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001320
1321 /*
1322 * Enable fault control interrupt.
1323 */
Jiang Liu7c919772014-01-06 14:18:18 +08001324 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001325 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001326 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001327
1328 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001329 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001330 (unsigned long long)drhd->reg_base_addr, ret);
1331 return -1;
1332 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001333
1334 /*
1335 * Clear any previous faults.
1336 */
1337 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001338 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1339 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001340 }
1341
1342 return 0;
1343}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001344
1345/*
1346 * Re-enable Queued Invalidation interface.
1347 */
1348int dmar_reenable_qi(struct intel_iommu *iommu)
1349{
1350 if (!ecap_qis(iommu->ecap))
1351 return -ENOENT;
1352
1353 if (!iommu->qi)
1354 return -ENOENT;
1355
1356 /*
1357 * First disable queued invalidation.
1358 */
1359 dmar_disable_qi(iommu);
1360 /*
1361 * Then enable queued invalidation again. Since there is no pending
1362 * invalidation requests now, it's safe to re-enable queued
1363 * invalidation.
1364 */
1365 __dmar_enable_qi(iommu);
1366
1367 return 0;
1368}
Youquan Song074835f2009-09-09 12:05:39 -04001369
1370/*
1371 * Check interrupt remapping support in DMAR table description.
1372 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001373int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001374{
1375 struct acpi_table_dmar *dmar;
1376 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001377 if (!dmar)
1378 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001379 return dmar->flags & 0x1;
1380}
Jiang Liu694835d2014-01-06 14:18:16 +08001381
Jiang Liua868e6b2014-01-06 14:18:20 +08001382static int __init dmar_free_unused_resources(void)
1383{
1384 struct dmar_drhd_unit *dmaru, *dmaru_n;
1385
1386 /* DMAR units are in use */
1387 if (irq_remapping_enabled || intel_iommu_enabled)
1388 return 0;
1389
1390 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1391 list_del(&dmaru->list);
1392 dmar_free_drhd(dmaru);
1393 }
1394
1395 return 0;
1396}
1397
1398late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001399IOMMU_INIT_POST(detect_intel_iommu);