blob: 4ae6df27ad5d861ecb4bde612e955e07f208fa5d [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
Jiang Liu694835d2014-01-06 14:18:16 +080055static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080056static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080057
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040091 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 break;
93 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080094 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070095 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040096 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070097 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800105 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Jiang Liubb3a6b72014-02-19 14:07:24 +0800120void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700121{
122 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700123
124 *cnt = 0;
125 while (start < end) {
126 scope = start;
127 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
128 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
129 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600130 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
131 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400132 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100133 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134 start += scope->length;
135 }
136 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800137 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700138
Jiang Liubb3a6b72014-02-19 14:07:24 +0800139 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
140}
141
142int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
143 struct pci_dev ***devices, u16 segment)
144{
145 struct acpi_dmar_device_scope *scope;
146 int index, ret;
147
148 *devices = dmar_alloc_dev_scope(start, end, cnt);
149 if (*cnt == 0)
150 return 0;
151 else if (!*devices)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700152 return -ENOMEM;
153
Jiang Liubb3a6b72014-02-19 14:07:24 +0800154 for (index = 0; start < end; start += scope->length) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700155 scope = start;
156 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
157 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
158 ret = dmar_parse_one_dev_scope(scope,
159 &(*devices)[index], segment);
160 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800161 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700162 return ret;
163 }
164 index ++;
165 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700166 }
167
168 return 0;
169}
170
Jiang Liuada4d4b2014-01-06 14:18:09 +0800171void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
172{
Jiang Liub683b232014-02-19 14:07:32 +0800173 int i;
174 struct pci_dev *tmp_dev;
175
Jiang Liuada4d4b2014-01-06 14:18:09 +0800176 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800177 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
178 pci_dev_put(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800179 kfree(*devices);
180 *devices = NULL;
181 *cnt = 0;
182 }
183}
184
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700185/**
186 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
187 * structure which uniquely represent one DMA remapping hardware unit
188 * present in the platform
189 */
190static int __init
191dmar_parse_one_drhd(struct acpi_dmar_header *header)
192{
193 struct acpi_dmar_hardware_unit *drhd;
194 struct dmar_drhd_unit *dmaru;
195 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700196
David Woodhousee523b382009-04-10 22:27:48 -0700197 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700198 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
199 if (!dmaru)
200 return -ENOMEM;
201
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700202 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700203 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100204 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700205 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
206
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700207 ret = alloc_iommu(dmaru);
208 if (ret) {
209 kfree(dmaru);
210 return ret;
211 }
212 dmar_register_drhd_unit(dmaru);
213 return 0;
214}
215
Jiang Liua868e6b2014-01-06 14:18:20 +0800216static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
217{
218 if (dmaru->devices && dmaru->devices_cnt)
219 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
220 if (dmaru->iommu)
221 free_iommu(dmaru->iommu);
222 kfree(dmaru);
223}
224
David Woodhousef82851a2008-10-18 15:43:14 +0100225static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700226{
227 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700228
229 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
230
Yu Zhao2e824f72008-12-22 16:54:58 +0800231 if (dmaru->include_all)
232 return 0;
233
Jiang Liua868e6b2014-01-06 14:18:20 +0800234 return dmar_parse_dev_scope((void *)(drhd + 1),
235 ((void *)drhd) + drhd->header.length,
236 &dmaru->devices_cnt, &dmaru->devices,
237 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700238}
239
David Woodhouseaa697072009-10-07 12:18:00 +0100240#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700241static int __init
242dmar_parse_one_rhsa(struct acpi_dmar_header *header)
243{
244 struct acpi_dmar_rhsa *rhsa;
245 struct dmar_drhd_unit *drhd;
246
247 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100248 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700249 if (drhd->reg_base_addr == rhsa->base_address) {
250 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
251
252 if (!node_online(node))
253 node = -1;
254 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100255 return 0;
256 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700257 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100258 WARN_TAINT(
259 1, TAINT_FIRMWARE_WORKAROUND,
260 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
261 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
262 drhd->reg_base_addr,
263 dmi_get_system_info(DMI_BIOS_VENDOR),
264 dmi_get_system_info(DMI_BIOS_VERSION),
265 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700266
David Woodhouseaa697072009-10-07 12:18:00 +0100267 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700268}
David Woodhouseaa697072009-10-07 12:18:00 +0100269#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700270
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700271static void __init
272dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
273{
274 struct acpi_dmar_hardware_unit *drhd;
275 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800276 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700277 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700278
279 switch (header->type) {
280 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800281 drhd = container_of(header, struct acpi_dmar_hardware_unit,
282 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400283 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800284 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700285 break;
286 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800287 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
288 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400289 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700290 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700292 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800293 case ACPI_DMAR_TYPE_ATSR:
294 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400295 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800296 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700297 case ACPI_DMAR_HARDWARE_AFFINITY:
298 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400299 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700300 (unsigned long long)rhsa->base_address,
301 rhsa->proximity_domain);
302 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700303 }
304}
305
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700306/**
307 * dmar_table_detect - checks to see if the platform supports DMAR devices
308 */
309static int __init dmar_table_detect(void)
310{
311 acpi_status status = AE_OK;
312
313 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800314 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
315 (struct acpi_table_header **)&dmar_tbl,
316 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700317
318 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400319 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700320 status = AE_NOT_FOUND;
321 }
322
323 return (ACPI_SUCCESS(status) ? 1 : 0);
324}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700325
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700326/**
327 * parse_dmar_table - parses the DMA reporting table
328 */
329static int __init
330parse_dmar_table(void)
331{
332 struct acpi_table_dmar *dmar;
333 struct acpi_dmar_header *entry_header;
334 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800335 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700336
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700337 /*
338 * Do it again, earlier dmar_tbl mapping could be mapped with
339 * fixed map.
340 */
341 dmar_table_detect();
342
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700343 /*
344 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
345 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
346 */
347 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
348
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700349 dmar = (struct acpi_table_dmar *)dmar_tbl;
350 if (!dmar)
351 return -ENODEV;
352
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700353 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400354 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700355 return -EINVAL;
356 }
357
Donald Dutilee9071b02012-06-08 17:13:11 -0400358 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700359
360 entry_header = (struct acpi_dmar_header *)(dmar + 1);
361 while (((unsigned long)entry_header) <
362 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800363 /* Avoid looping forever on bad ACPI tables */
364 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400365 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800366 ret = -EINVAL;
367 break;
368 }
369
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700370 dmar_table_print_dmar_entry(entry_header);
371
372 switch (entry_header->type) {
373 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800374 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700375 ret = dmar_parse_one_drhd(entry_header);
376 break;
377 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
378 ret = dmar_parse_one_rmrr(entry_header);
379 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800380 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800381 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800382 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700383 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100384#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700385 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100386#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700387 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700388 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400389 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100390 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700391 ret = 0; /* for forward compatibility */
392 break;
393 }
394 if (ret)
395 break;
396
397 entry_header = ((void *)entry_header + entry_header->length);
398 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800399 if (drhd_count == 0)
400 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700401 return ret;
402}
403
Yinghaidda56542010-04-09 01:07:55 +0100404static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700405 struct pci_dev *dev)
406{
407 int index;
Jiang Liub683b232014-02-19 14:07:32 +0800408 struct pci_dev *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700409
410 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800411 for_each_active_dev_scope(devices, cnt, index, tmp)
412 if (dev == tmp)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700413 return 1;
414
415 /* Check our parent */
416 dev = dev->bus->self;
417 }
418
419 return 0;
420}
421
422struct dmar_drhd_unit *
423dmar_find_matched_drhd_unit(struct pci_dev *dev)
424{
Yu Zhao2e824f72008-12-22 16:54:58 +0800425 struct dmar_drhd_unit *dmaru = NULL;
426 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700427
Yinghaidda56542010-04-09 01:07:55 +0100428 dev = pci_physfn(dev);
429
Yijing Wang8b161f02013-10-31 17:25:16 +0800430 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800431 drhd = container_of(dmaru->hdr,
432 struct acpi_dmar_hardware_unit,
433 header);
434
435 if (dmaru->include_all &&
436 drhd->segment == pci_domain_nr(dev->bus))
437 return dmaru;
438
439 if (dmar_pci_device_match(dmaru->devices,
440 dmaru->devices_cnt, dev))
441 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700442 }
443
444 return NULL;
445}
446
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700447int __init dmar_dev_scope_init(void)
448{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700449 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800450 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700451 int ret = -ENODEV;
452
Suresh Siddhac2c72862011-08-23 17:05:19 -0700453 if (dmar_dev_scope_initialized)
454 return dmar_dev_scope_initialized;
455
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700456 if (list_empty(&dmar_drhd_units))
457 goto fail;
458
Jiang Liub683b232014-02-19 14:07:32 +0800459 for_each_drhd_unit(drhd) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700460 ret = dmar_parse_dev(drhd);
461 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700462 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700463 }
464
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700465 ret = dmar_parse_rmrr_atsr_dev();
466 if (ret)
467 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700468
Suresh Siddhac2c72862011-08-23 17:05:19 -0700469 dmar_dev_scope_initialized = 1;
470 return 0;
471
472fail:
473 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700474 return ret;
475}
476
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700477
478int __init dmar_table_init(void)
479{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700480 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800481 int ret;
482
Jiang Liucc053012014-01-06 14:18:24 +0800483 if (dmar_table_initialized == 0) {
484 ret = parse_dmar_table();
485 if (ret < 0) {
486 if (ret != -ENODEV)
487 pr_info("parse DMAR table failure.\n");
488 } else if (list_empty(&dmar_drhd_units)) {
489 pr_info("No DMAR devices found\n");
490 ret = -ENODEV;
491 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700492
Jiang Liucc053012014-01-06 14:18:24 +0800493 if (ret < 0)
494 dmar_table_initialized = ret;
495 else
496 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800497 }
498
Jiang Liucc053012014-01-06 14:18:24 +0800499 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700500}
501
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100502static void warn_invalid_dmar(u64 addr, const char *message)
503{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100504 WARN_TAINT_ONCE(
505 1, TAINT_FIRMWARE_WORKAROUND,
506 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
507 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
508 addr, message,
509 dmi_get_system_info(DMI_BIOS_VENDOR),
510 dmi_get_system_info(DMI_BIOS_VERSION),
511 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100512}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000513
Rashika Kheria21004dc2013-12-18 12:01:46 +0530514static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000515{
516 struct acpi_table_dmar *dmar;
517 struct acpi_dmar_header *entry_header;
518 struct acpi_dmar_hardware_unit *drhd;
519
520 dmar = (struct acpi_table_dmar *)dmar_tbl;
521 entry_header = (struct acpi_dmar_header *)(dmar + 1);
522
523 while (((unsigned long)entry_header) <
524 (((unsigned long)dmar) + dmar_tbl->length)) {
525 /* Avoid looping forever on bad ACPI tables */
526 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400527 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000528 return 0;
529 }
530
531 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000532 void __iomem *addr;
533 u64 cap, ecap;
534
David Woodhouse86cf8982009-11-09 22:15:15 +0000535 drhd = (void *)entry_header;
536 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100537 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000538 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000539 }
Chris Wright2c992202009-12-02 09:17:13 +0000540
541 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
542 if (!addr ) {
543 printk("IOMMU: can't validate: %llx\n", drhd->address);
544 goto failed;
545 }
546 cap = dmar_readq(addr + DMAR_CAP_REG);
547 ecap = dmar_readq(addr + DMAR_ECAP_REG);
548 early_iounmap(addr, VTD_PAGE_SIZE);
549 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100550 warn_invalid_dmar(drhd->address,
551 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000552 goto failed;
553 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000554 }
555
556 entry_header = ((void *)entry_header + entry_header->length);
557 }
558 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000559
560failed:
Chris Wright2c992202009-12-02 09:17:13 +0000561 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000562}
563
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400564int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700565{
566 int ret;
567
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700568 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000569 if (ret)
570 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700571 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800572 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700573 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800574 /* Make sure ACS will be enabled */
575 pci_request_acs();
576 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700577
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900578#ifdef CONFIG_X86
579 if (ret)
580 x86_init.iommu.iommu_init = intel_iommu_init;
581#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700582 }
Jiang Liub707cb02014-01-06 14:18:26 +0800583 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700584 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400585
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400586 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700587}
588
589
Donald Dutile6f5cf522012-06-04 17:29:02 -0400590static void unmap_iommu(struct intel_iommu *iommu)
591{
592 iounmap(iommu->reg);
593 release_mem_region(iommu->reg_phys, iommu->reg_size);
594}
595
596/**
597 * map_iommu: map the iommu's registers
598 * @iommu: the iommu to map
599 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400600 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400601 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400602 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400603 */
604static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
605{
606 int map_size, err=0;
607
608 iommu->reg_phys = phys_addr;
609 iommu->reg_size = VTD_PAGE_SIZE;
610
611 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
612 pr_err("IOMMU: can't reserve memory\n");
613 err = -EBUSY;
614 goto out;
615 }
616
617 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
618 if (!iommu->reg) {
619 pr_err("IOMMU: can't map the region\n");
620 err = -ENOMEM;
621 goto release;
622 }
623
624 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
625 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
626
627 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
628 err = -EINVAL;
629 warn_invalid_dmar(phys_addr, " returns all ones");
630 goto unmap;
631 }
632
633 /* the registers might be more than one page */
634 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
635 cap_max_fault_reg_offset(iommu->cap));
636 map_size = VTD_PAGE_ALIGN(map_size);
637 if (map_size > iommu->reg_size) {
638 iounmap(iommu->reg);
639 release_mem_region(iommu->reg_phys, iommu->reg_size);
640 iommu->reg_size = map_size;
641 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
642 iommu->name)) {
643 pr_err("IOMMU: can't reserve memory\n");
644 err = -EBUSY;
645 goto out;
646 }
647 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
648 if (!iommu->reg) {
649 pr_err("IOMMU: can't map the region\n");
650 err = -ENOMEM;
651 goto release;
652 }
653 }
654 err = 0;
655 goto out;
656
657unmap:
658 iounmap(iommu->reg);
659release:
660 release_mem_region(iommu->reg_phys, iommu->reg_size);
661out:
662 return err;
663}
664
Jiang Liu694835d2014-01-06 14:18:16 +0800665static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700666{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700667 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900668 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700669 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100670 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700671 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400672 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700673
David Woodhouse6ecbf012009-12-02 09:20:27 +0000674 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100675 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000676 return -EINVAL;
677 }
678
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700679 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
680 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700681 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700682
683 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700684 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700685
Donald Dutile6f5cf522012-06-04 17:29:02 -0400686 err = map_iommu(iommu, drhd->reg_base_addr);
687 if (err) {
688 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700689 goto error;
690 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700691
Donald Dutile6f5cf522012-06-04 17:29:02 -0400692 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800693 agaw = iommu_calculate_agaw(iommu);
694 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400695 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
696 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100697 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700698 }
699 msagaw = iommu_calculate_max_sagaw(iommu);
700 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400701 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800702 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100703 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800704 }
705 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700706 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800707
Suresh Siddhaee34b322009-10-02 11:01:21 -0700708 iommu->node = -1;
709
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700710 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100711 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
712 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700713 (unsigned long long)drhd->reg_base_addr,
714 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
715 (unsigned long long)iommu->cap,
716 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700717
Takao Indoh3a93c842013-04-23 17:35:03 +0900718 /* Reflect status in gcmd */
719 sts = readl(iommu->reg + DMAR_GSTS_REG);
720 if (sts & DMA_GSTS_IRES)
721 iommu->gcmd |= DMA_GCMD_IRE;
722 if (sts & DMA_GSTS_TES)
723 iommu->gcmd |= DMA_GCMD_TE;
724 if (sts & DMA_GSTS_QIES)
725 iommu->gcmd |= DMA_GCMD_QIE;
726
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200727 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700728
729 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700730 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100731
732 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400733 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100734 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700735 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400736 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700737}
738
Jiang Liua868e6b2014-01-06 14:18:20 +0800739static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700740{
Jiang Liua868e6b2014-01-06 14:18:20 +0800741 if (iommu->irq) {
742 free_irq(iommu->irq, iommu);
743 irq_set_handler_data(iommu->irq, NULL);
744 destroy_irq(iommu->irq);
745 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700746
Jiang Liua84da702014-01-06 14:18:23 +0800747 if (iommu->qi) {
748 free_page((unsigned long)iommu->qi->desc);
749 kfree(iommu->qi->desc_status);
750 kfree(iommu->qi);
751 }
752
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700753 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400754 unmap_iommu(iommu);
755
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700756 kfree(iommu);
757}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700758
759/*
760 * Reclaim all the submitted descriptors which have completed its work.
761 */
762static inline void reclaim_free_desc(struct q_inval *qi)
763{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800764 while (qi->desc_status[qi->free_tail] == QI_DONE ||
765 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700766 qi->desc_status[qi->free_tail] = QI_FREE;
767 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
768 qi->free_cnt++;
769 }
770}
771
Yu Zhao704126a2009-01-04 16:28:52 +0800772static int qi_check_fault(struct intel_iommu *iommu, int index)
773{
774 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800775 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800776 struct q_inval *qi = iommu->qi;
777 int wait_index = (index + 1) % QI_LENGTH;
778
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800779 if (qi->desc_status[wait_index] == QI_ABORT)
780 return -EAGAIN;
781
Yu Zhao704126a2009-01-04 16:28:52 +0800782 fault = readl(iommu->reg + DMAR_FSTS_REG);
783
784 /*
785 * If IQE happens, the head points to the descriptor associated
786 * with the error. No new descriptors are fetched until the IQE
787 * is cleared.
788 */
789 if (fault & DMA_FSTS_IQE) {
790 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800791 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400792 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800793 "low=%llx, high=%llx\n",
794 (unsigned long long)qi->desc[index].low,
795 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800796 memcpy(&qi->desc[index], &qi->desc[wait_index],
797 sizeof(struct qi_desc));
798 __iommu_flush_cache(iommu, &qi->desc[index],
799 sizeof(struct qi_desc));
800 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
801 return -EINVAL;
802 }
803 }
804
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800805 /*
806 * If ITE happens, all pending wait_desc commands are aborted.
807 * No new descriptors are fetched until the ITE is cleared.
808 */
809 if (fault & DMA_FSTS_ITE) {
810 head = readl(iommu->reg + DMAR_IQH_REG);
811 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
812 head |= 1;
813 tail = readl(iommu->reg + DMAR_IQT_REG);
814 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
815
816 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
817
818 do {
819 if (qi->desc_status[head] == QI_IN_USE)
820 qi->desc_status[head] = QI_ABORT;
821 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
822 } while (head != tail);
823
824 if (qi->desc_status[wait_index] == QI_ABORT)
825 return -EAGAIN;
826 }
827
828 if (fault & DMA_FSTS_ICE)
829 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
830
Yu Zhao704126a2009-01-04 16:28:52 +0800831 return 0;
832}
833
Suresh Siddhafe962e92008-07-10 11:16:42 -0700834/*
835 * Submit the queued invalidation descriptor to the remapping
836 * hardware unit and wait for its completion.
837 */
Yu Zhao704126a2009-01-04 16:28:52 +0800838int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700839{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800840 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700841 struct q_inval *qi = iommu->qi;
842 struct qi_desc *hw, wait_desc;
843 int wait_index, index;
844 unsigned long flags;
845
846 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800847 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700848
849 hw = qi->desc;
850
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800851restart:
852 rc = 0;
853
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200854 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700855 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200856 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700857 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200858 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700859 }
860
861 index = qi->free_head;
862 wait_index = (index + 1) % QI_LENGTH;
863
864 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
865
866 hw[index] = *desc;
867
Yu Zhao704126a2009-01-04 16:28:52 +0800868 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
869 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700870 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
871
872 hw[wait_index] = wait_desc;
873
874 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
875 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
876
877 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
878 qi->free_cnt -= 2;
879
Suresh Siddhafe962e92008-07-10 11:16:42 -0700880 /*
881 * update the HW tail register indicating the presence of
882 * new descriptors.
883 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800884 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700885
886 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700887 /*
888 * We will leave the interrupts disabled, to prevent interrupt
889 * context to queue another cmd while a cmd is already submitted
890 * and waiting for completion on this cpu. This is to avoid
891 * a deadlock where the interrupt context can wait indefinitely
892 * for free slots in the queue.
893 */
Yu Zhao704126a2009-01-04 16:28:52 +0800894 rc = qi_check_fault(iommu, index);
895 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800896 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800897
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200898 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700899 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200900 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700901 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800902
903 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700904
905 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200906 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800907
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800908 if (rc == -EAGAIN)
909 goto restart;
910
Yu Zhao704126a2009-01-04 16:28:52 +0800911 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700912}
913
914/*
915 * Flush the global interrupt entry cache.
916 */
917void qi_global_iec(struct intel_iommu *iommu)
918{
919 struct qi_desc desc;
920
921 desc.low = QI_IEC_TYPE;
922 desc.high = 0;
923
Yu Zhao704126a2009-01-04 16:28:52 +0800924 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700925 qi_submit_sync(&desc, iommu);
926}
927
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100928void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
929 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700930{
Youquan Song3481f212008-10-16 16:31:55 -0700931 struct qi_desc desc;
932
Youquan Song3481f212008-10-16 16:31:55 -0700933 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
934 | QI_CC_GRAN(type) | QI_CC_TYPE;
935 desc.high = 0;
936
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100937 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700938}
939
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100940void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
941 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700942{
943 u8 dw = 0, dr = 0;
944
945 struct qi_desc desc;
946 int ih = 0;
947
Youquan Song3481f212008-10-16 16:31:55 -0700948 if (cap_write_drain(iommu->cap))
949 dw = 1;
950
951 if (cap_read_drain(iommu->cap))
952 dr = 1;
953
954 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
955 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
956 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
957 | QI_IOTLB_AM(size_order);
958
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100959 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700960}
961
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800962void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
963 u64 addr, unsigned mask)
964{
965 struct qi_desc desc;
966
967 if (mask) {
968 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
969 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
970 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
971 } else
972 desc.high = QI_DEV_IOTLB_ADDR(addr);
973
974 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
975 qdep = 0;
976
977 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
978 QI_DIOTLB_TYPE;
979
980 qi_submit_sync(&desc, iommu);
981}
982
Suresh Siddhafe962e92008-07-10 11:16:42 -0700983/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700984 * Disable Queued Invalidation interface.
985 */
986void dmar_disable_qi(struct intel_iommu *iommu)
987{
988 unsigned long flags;
989 u32 sts;
990 cycles_t start_time = get_cycles();
991
992 if (!ecap_qis(iommu->ecap))
993 return;
994
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200995 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700996
997 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
998 if (!(sts & DMA_GSTS_QIES))
999 goto end;
1000
1001 /*
1002 * Give a chance to HW to complete the pending invalidation requests.
1003 */
1004 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1005 readl(iommu->reg + DMAR_IQH_REG)) &&
1006 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1007 cpu_relax();
1008
1009 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001010 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1011
1012 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1013 !(sts & DMA_GSTS_QIES), sts);
1014end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001015 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001016}
1017
1018/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001019 * Enable queued invalidation.
1020 */
1021static void __dmar_enable_qi(struct intel_iommu *iommu)
1022{
David Woodhousec416daa2009-05-10 20:30:58 +01001023 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001024 unsigned long flags;
1025 struct q_inval *qi = iommu->qi;
1026
1027 qi->free_head = qi->free_tail = 0;
1028 qi->free_cnt = QI_LENGTH;
1029
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001030 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001031
1032 /* write zero to the tail reg */
1033 writel(0, iommu->reg + DMAR_IQT_REG);
1034
1035 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1036
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001037 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001038 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001039
1040 /* Make sure hardware complete it */
1041 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1042
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001043 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001044}
1045
1046/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001047 * Enable Queued Invalidation interface. This is a must to support
1048 * interrupt-remapping. Also used by DMA-remapping, which replaces
1049 * register based IOTLB invalidation.
1050 */
1051int dmar_enable_qi(struct intel_iommu *iommu)
1052{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001053 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001054 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001055
1056 if (!ecap_qis(iommu->ecap))
1057 return -ENOENT;
1058
1059 /*
1060 * queued invalidation is already setup and enabled.
1061 */
1062 if (iommu->qi)
1063 return 0;
1064
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001065 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001066 if (!iommu->qi)
1067 return -ENOMEM;
1068
1069 qi = iommu->qi;
1070
Suresh Siddha751cafe2009-10-02 11:01:22 -07001071
1072 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1073 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001074 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001075 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001076 return -ENOMEM;
1077 }
1078
Suresh Siddha751cafe2009-10-02 11:01:22 -07001079 qi->desc = page_address(desc_page);
1080
Hannes Reinecke37a40712013-02-06 09:50:10 +01001081 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001082 if (!qi->desc_status) {
1083 free_page((unsigned long) qi->desc);
1084 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001085 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001086 return -ENOMEM;
1087 }
1088
1089 qi->free_head = qi->free_tail = 0;
1090 qi->free_cnt = QI_LENGTH;
1091
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001092 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001093
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001094 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001095
1096 return 0;
1097}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001098
1099/* iommu interrupt handling. Most stuff are MSI-like. */
1100
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001101enum faulttype {
1102 DMA_REMAP,
1103 INTR_REMAP,
1104 UNKNOWN,
1105};
1106
1107static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001108{
1109 "Software",
1110 "Present bit in root entry is clear",
1111 "Present bit in context entry is clear",
1112 "Invalid context entry",
1113 "Access beyond MGAW",
1114 "PTE Write access is not set",
1115 "PTE Read access is not set",
1116 "Next page table ptr is invalid",
1117 "Root table address invalid",
1118 "Context table ptr is invalid",
1119 "non-zero reserved fields in RTP",
1120 "non-zero reserved fields in CTP",
1121 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001122 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001123};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001124
Suresh Siddha95a02e92012-03-30 11:47:07 -07001125static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001126{
1127 "Detected reserved fields in the decoded interrupt-remapped request",
1128 "Interrupt index exceeded the interrupt-remapping table size",
1129 "Present field in the IRTE entry is clear",
1130 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1131 "Detected reserved fields in the IRTE entry",
1132 "Blocked a compatibility format interrupt request",
1133 "Blocked an interrupt request due to source-id verification failure",
1134};
1135
Rashika Kheria21004dc2013-12-18 12:01:46 +05301136static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001137{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001138 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1139 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001140 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001141 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001142 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1143 *fault_type = DMA_REMAP;
1144 return dma_remap_fault_reasons[fault_reason];
1145 } else {
1146 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001147 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001148 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001149}
1150
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001151void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001152{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001153 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001154 unsigned long flag;
1155
1156 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001157 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001158 writel(0, iommu->reg + DMAR_FECTL_REG);
1159 /* Read a reg to force flush the post write */
1160 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001161 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001162}
1163
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001164void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165{
1166 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001167 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001168
1169 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001170 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001171 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1172 /* Read a reg to force flush the post write */
1173 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001175}
1176
1177void dmar_msi_write(int irq, struct msi_msg *msg)
1178{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001179 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001180 unsigned long flag;
1181
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001182 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001183 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1184 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1185 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001187}
1188
1189void dmar_msi_read(int irq, struct msi_msg *msg)
1190{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001191 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001192 unsigned long flag;
1193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001195 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1196 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1197 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001198 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001199}
1200
1201static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1202 u8 fault_reason, u16 source_id, unsigned long long addr)
1203{
1204 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001205 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001206
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001207 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001208
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001209 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001210 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001211 "fault index %llx\n"
1212 "INTR-REMAP:[fault reason %02d] %s\n",
1213 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1214 PCI_FUNC(source_id & 0xFF), addr >> 48,
1215 fault_reason, reason);
1216 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001217 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001218 "fault addr %llx \n"
1219 "DMAR:[fault reason %02d] %s\n",
1220 (type ? "DMA Read" : "DMA Write"),
1221 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1222 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001223 return 0;
1224}
1225
1226#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001227irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001228{
1229 struct intel_iommu *iommu = dev_id;
1230 int reg, fault_index;
1231 u32 fault_status;
1232 unsigned long flag;
1233
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001234 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001235 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001236 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001237 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001238
1239 /* TBD: ignore advanced fault log currently */
1240 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001241 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001242
1243 fault_index = dma_fsts_fault_record_index(fault_status);
1244 reg = cap_fault_reg_offset(iommu->cap);
1245 while (1) {
1246 u8 fault_reason;
1247 u16 source_id;
1248 u64 guest_addr;
1249 int type;
1250 u32 data;
1251
1252 /* highest 32 bits */
1253 data = readl(iommu->reg + reg +
1254 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1255 if (!(data & DMA_FRCD_F))
1256 break;
1257
1258 fault_reason = dma_frcd_fault_reason(data);
1259 type = dma_frcd_type(data);
1260
1261 data = readl(iommu->reg + reg +
1262 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1263 source_id = dma_frcd_source_id(data);
1264
1265 guest_addr = dmar_readq(iommu->reg + reg +
1266 fault_index * PRIMARY_FAULT_REG_LEN);
1267 guest_addr = dma_frcd_page_addr(guest_addr);
1268 /* clear the fault */
1269 writel(DMA_FRCD_F, iommu->reg + reg +
1270 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1271
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001272 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001273
1274 dmar_fault_do_one(iommu, type, fault_reason,
1275 source_id, guest_addr);
1276
1277 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001278 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001279 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001280 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001281 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001282
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001283 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1284
1285unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001286 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001287 return IRQ_HANDLED;
1288}
1289
1290int dmar_set_interrupt(struct intel_iommu *iommu)
1291{
1292 int irq, ret;
1293
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001294 /*
1295 * Check if the fault interrupt is already initialized.
1296 */
1297 if (iommu->irq)
1298 return 0;
1299
Suresh Siddha0ac24912009-03-16 17:04:54 -07001300 irq = create_irq();
1301 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001302 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001303 return -EINVAL;
1304 }
1305
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001306 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001307 iommu->irq = irq;
1308
1309 ret = arch_setup_dmar_msi(irq);
1310 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001311 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001312 iommu->irq = 0;
1313 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001314 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001315 }
1316
Thomas Gleixner477694e2011-07-19 16:25:42 +02001317 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001318 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001319 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001320 return ret;
1321}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001322
1323int __init enable_drhd_fault_handling(void)
1324{
1325 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001326 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001327
1328 /*
1329 * Enable fault control interrupt.
1330 */
Jiang Liu7c919772014-01-06 14:18:18 +08001331 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001332 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001333 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001334
1335 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001336 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001337 (unsigned long long)drhd->reg_base_addr, ret);
1338 return -1;
1339 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001340
1341 /*
1342 * Clear any previous faults.
1343 */
1344 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001345 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1346 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001347 }
1348
1349 return 0;
1350}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001351
1352/*
1353 * Re-enable Queued Invalidation interface.
1354 */
1355int dmar_reenable_qi(struct intel_iommu *iommu)
1356{
1357 if (!ecap_qis(iommu->ecap))
1358 return -ENOENT;
1359
1360 if (!iommu->qi)
1361 return -ENOENT;
1362
1363 /*
1364 * First disable queued invalidation.
1365 */
1366 dmar_disable_qi(iommu);
1367 /*
1368 * Then enable queued invalidation again. Since there is no pending
1369 * invalidation requests now, it's safe to re-enable queued
1370 * invalidation.
1371 */
1372 __dmar_enable_qi(iommu);
1373
1374 return 0;
1375}
Youquan Song074835f2009-09-09 12:05:39 -04001376
1377/*
1378 * Check interrupt remapping support in DMAR table description.
1379 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001380int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001381{
1382 struct acpi_table_dmar *dmar;
1383 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001384 if (!dmar)
1385 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001386 return dmar->flags & 0x1;
1387}
Jiang Liu694835d2014-01-06 14:18:16 +08001388
Jiang Liua868e6b2014-01-06 14:18:20 +08001389static int __init dmar_free_unused_resources(void)
1390{
1391 struct dmar_drhd_unit *dmaru, *dmaru_n;
1392
1393 /* DMAR units are in use */
1394 if (irq_remapping_enabled || intel_iommu_enabled)
1395 return 0;
1396
1397 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1398 list_del(&dmaru->list);
1399 dmar_free_drhd(dmaru);
1400 }
1401
1402 return 0;
1403}
1404
1405late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001406IOMMU_INIT_POST(detect_intel_iommu);