blob: 6e4d851991f17560d5c056b9cede6a2503254e37 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Jiang Liu3a5670e2014-02-19 14:07:33 +080046/*
47 * Assumptions:
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
53 *
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070057 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080058DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070059LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070060
Suresh Siddha41750d32011-08-23 17:05:18 -070061struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080062static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070063
Jiang Liu694835d2014-01-06 14:18:16 +080064static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080065static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080066
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
68{
69 /*
70 * add INCLUDE_ALL at the tail, so scan the list will find it at
71 * the very end.
72 */
73 if (drhd->include_all)
Jiang Liu0e2426122014-02-19 14:07:34 +080074 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070075 else
Jiang Liu0e2426122014-02-19 14:07:34 +080076 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070077}
78
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070079static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
Jiang Liu0e2426122014-02-19 14:07:34 +080080 struct pci_dev __rcu **dev, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070081{
82 struct pci_bus *bus;
83 struct pci_dev *pdev = NULL;
84 struct acpi_dmar_pci_path *path;
85 int count;
86
87 bus = pci_find_bus(segment, scope->bus);
88 path = (struct acpi_dmar_pci_path *)(scope + 1);
89 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
90 / sizeof(struct acpi_dmar_pci_path);
91
92 while (count) {
93 if (pdev)
94 pci_dev_put(pdev);
95 /*
96 * Some BIOSes list non-exist devices in DMAR table, just
97 * ignore it
98 */
99 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400100 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700101 break;
102 }
Lv Zhengfa5f5082013-10-31 09:30:22 +0800103 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700104 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400105 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 break;
107 }
108 path ++;
109 count --;
110 bus = pdev->subordinate;
111 }
112 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400113 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800114 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700115 return 0;
116 }
117 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
118 pdev->subordinate) || (scope->entry_type == \
119 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
120 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400121 pr_warn("Device scope type does not match for %s\n",
122 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700123 return -EINVAL;
124 }
Jiang Liu0e2426122014-02-19 14:07:34 +0800125
126 rcu_assign_pointer(*dev, pdev);
127
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700128 return 0;
129}
130
Jiang Liubb3a6b72014-02-19 14:07:24 +0800131void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700132{
133 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134
135 *cnt = 0;
136 while (start < end) {
137 scope = start;
138 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
139 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
140 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600141 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
142 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400143 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100144 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700145 start += scope->length;
146 }
147 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800148 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700149
Jiang Liubb3a6b72014-02-19 14:07:24 +0800150 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
151}
152
153int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
Jiang Liu0e2426122014-02-19 14:07:34 +0800154 struct pci_dev __rcu ***devices, u16 segment)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800155{
156 struct acpi_dmar_device_scope *scope;
157 int index, ret;
158
159 *devices = dmar_alloc_dev_scope(start, end, cnt);
160 if (*cnt == 0)
161 return 0;
162 else if (!*devices)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700163 return -ENOMEM;
164
Jiang Liubb3a6b72014-02-19 14:07:24 +0800165 for (index = 0; start < end; start += scope->length) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700166 scope = start;
167 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
168 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
169 ret = dmar_parse_one_dev_scope(scope,
170 &(*devices)[index], segment);
171 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800172 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700173 return ret;
174 }
175 index ++;
176 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177 }
178
179 return 0;
180}
181
Jiang Liu0e2426122014-02-19 14:07:34 +0800182void dmar_free_dev_scope(struct pci_dev __rcu ***devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800183{
Jiang Liub683b232014-02-19 14:07:32 +0800184 int i;
185 struct pci_dev *tmp_dev;
186
Jiang Liuada4d4b2014-01-06 14:18:09 +0800187 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800188 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
189 pci_dev_put(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800190 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800191 }
Jiang Liu0e2426122014-02-19 14:07:34 +0800192
193 *devices = NULL;
194 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800195}
196
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700197/**
198 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
199 * structure which uniquely represent one DMA remapping hardware unit
200 * present in the platform
201 */
202static int __init
203dmar_parse_one_drhd(struct acpi_dmar_header *header)
204{
205 struct acpi_dmar_hardware_unit *drhd;
206 struct dmar_drhd_unit *dmaru;
207 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700208
David Woodhousee523b382009-04-10 22:27:48 -0700209 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700210 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
211 if (!dmaru)
212 return -ENOMEM;
213
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100216 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700217 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
218
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219 ret = alloc_iommu(dmaru);
220 if (ret) {
221 kfree(dmaru);
222 return ret;
223 }
224 dmar_register_drhd_unit(dmaru);
225 return 0;
226}
227
Jiang Liua868e6b2014-01-06 14:18:20 +0800228static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
229{
230 if (dmaru->devices && dmaru->devices_cnt)
231 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
232 if (dmaru->iommu)
233 free_iommu(dmaru->iommu);
234 kfree(dmaru);
235}
236
David Woodhousef82851a2008-10-18 15:43:14 +0100237static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700238{
239 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700240
241 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
242
Yu Zhao2e824f72008-12-22 16:54:58 +0800243 if (dmaru->include_all)
244 return 0;
245
Jiang Liua868e6b2014-01-06 14:18:20 +0800246 return dmar_parse_dev_scope((void *)(drhd + 1),
247 ((void *)drhd) + drhd->header.length,
248 &dmaru->devices_cnt, &dmaru->devices,
249 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700250}
251
David Woodhouseaa697072009-10-07 12:18:00 +0100252#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700253static int __init
254dmar_parse_one_rhsa(struct acpi_dmar_header *header)
255{
256 struct acpi_dmar_rhsa *rhsa;
257 struct dmar_drhd_unit *drhd;
258
259 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100260 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700261 if (drhd->reg_base_addr == rhsa->base_address) {
262 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
263
264 if (!node_online(node))
265 node = -1;
266 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100267 return 0;
268 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700269 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100270 WARN_TAINT(
271 1, TAINT_FIRMWARE_WORKAROUND,
272 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
273 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
274 drhd->reg_base_addr,
275 dmi_get_system_info(DMI_BIOS_VENDOR),
276 dmi_get_system_info(DMI_BIOS_VERSION),
277 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700278
David Woodhouseaa697072009-10-07 12:18:00 +0100279 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700280}
David Woodhouseaa697072009-10-07 12:18:00 +0100281#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700282
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700283static void __init
284dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
285{
286 struct acpi_dmar_hardware_unit *drhd;
287 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800288 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700289 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700290
291 switch (header->type) {
292 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800293 drhd = container_of(header, struct acpi_dmar_hardware_unit,
294 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400295 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800296 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700297 break;
298 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800299 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
300 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400301 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700302 (unsigned long long)rmrr->base_address,
303 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700304 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800305 case ACPI_DMAR_TYPE_ATSR:
306 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400307 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800308 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700309 case ACPI_DMAR_HARDWARE_AFFINITY:
310 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400311 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700312 (unsigned long long)rhsa->base_address,
313 rhsa->proximity_domain);
314 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700315 }
316}
317
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700318/**
319 * dmar_table_detect - checks to see if the platform supports DMAR devices
320 */
321static int __init dmar_table_detect(void)
322{
323 acpi_status status = AE_OK;
324
325 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800326 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
327 (struct acpi_table_header **)&dmar_tbl,
328 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700329
330 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400331 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700332 status = AE_NOT_FOUND;
333 }
334
335 return (ACPI_SUCCESS(status) ? 1 : 0);
336}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700337
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700338/**
339 * parse_dmar_table - parses the DMA reporting table
340 */
341static int __init
342parse_dmar_table(void)
343{
344 struct acpi_table_dmar *dmar;
345 struct acpi_dmar_header *entry_header;
346 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800347 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700348
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700349 /*
350 * Do it again, earlier dmar_tbl mapping could be mapped with
351 * fixed map.
352 */
353 dmar_table_detect();
354
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700355 /*
356 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
357 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
358 */
359 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
360
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700361 dmar = (struct acpi_table_dmar *)dmar_tbl;
362 if (!dmar)
363 return -ENODEV;
364
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700365 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400366 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700367 return -EINVAL;
368 }
369
Donald Dutilee9071b02012-06-08 17:13:11 -0400370 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700371
372 entry_header = (struct acpi_dmar_header *)(dmar + 1);
373 while (((unsigned long)entry_header) <
374 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800375 /* Avoid looping forever on bad ACPI tables */
376 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400377 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800378 ret = -EINVAL;
379 break;
380 }
381
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700382 dmar_table_print_dmar_entry(entry_header);
383
384 switch (entry_header->type) {
385 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800386 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700387 ret = dmar_parse_one_drhd(entry_header);
388 break;
389 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
390 ret = dmar_parse_one_rmrr(entry_header);
391 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800392 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800393 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800394 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700395 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100396#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700397 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100398#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700399 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700400 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400401 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100402 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700403 ret = 0; /* for forward compatibility */
404 break;
405 }
406 if (ret)
407 break;
408
409 entry_header = ((void *)entry_header + entry_header->length);
410 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800411 if (drhd_count == 0)
412 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700413 return ret;
414}
415
Jiang Liu0e2426122014-02-19 14:07:34 +0800416static int dmar_pci_device_match(struct pci_dev __rcu *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700417 struct pci_dev *dev)
418{
419 int index;
Jiang Liub683b232014-02-19 14:07:32 +0800420 struct pci_dev *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700421
422 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800423 for_each_active_dev_scope(devices, cnt, index, tmp)
424 if (dev == tmp)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700425 return 1;
426
427 /* Check our parent */
428 dev = dev->bus->self;
429 }
430
431 return 0;
432}
433
434struct dmar_drhd_unit *
435dmar_find_matched_drhd_unit(struct pci_dev *dev)
436{
Jiang Liu0e2426122014-02-19 14:07:34 +0800437 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800438 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700439
Yinghaidda56542010-04-09 01:07:55 +0100440 dev = pci_physfn(dev);
441
Jiang Liu0e2426122014-02-19 14:07:34 +0800442 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800443 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800444 drhd = container_of(dmaru->hdr,
445 struct acpi_dmar_hardware_unit,
446 header);
447
448 if (dmaru->include_all &&
449 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e2426122014-02-19 14:07:34 +0800450 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800451
452 if (dmar_pci_device_match(dmaru->devices,
453 dmaru->devices_cnt, dev))
Jiang Liu0e2426122014-02-19 14:07:34 +0800454 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700455 }
Jiang Liu0e2426122014-02-19 14:07:34 +0800456 dmaru = NULL;
457out:
458 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700459
Jiang Liu0e2426122014-02-19 14:07:34 +0800460 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700461}
462
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700463int __init dmar_dev_scope_init(void)
464{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700465 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800466 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700467 int ret = -ENODEV;
468
Suresh Siddhac2c72862011-08-23 17:05:19 -0700469 if (dmar_dev_scope_initialized)
470 return dmar_dev_scope_initialized;
471
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700472 if (list_empty(&dmar_drhd_units))
473 goto fail;
474
Jiang Liub683b232014-02-19 14:07:32 +0800475 for_each_drhd_unit(drhd) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700476 ret = dmar_parse_dev(drhd);
477 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700478 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700479 }
480
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700481 ret = dmar_parse_rmrr_atsr_dev();
482 if (ret)
483 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700484
Suresh Siddhac2c72862011-08-23 17:05:19 -0700485 dmar_dev_scope_initialized = 1;
486 return 0;
487
488fail:
489 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700490 return ret;
491}
492
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700493
494int __init dmar_table_init(void)
495{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700496 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800497 int ret;
498
Jiang Liucc053012014-01-06 14:18:24 +0800499 if (dmar_table_initialized == 0) {
500 ret = parse_dmar_table();
501 if (ret < 0) {
502 if (ret != -ENODEV)
503 pr_info("parse DMAR table failure.\n");
504 } else if (list_empty(&dmar_drhd_units)) {
505 pr_info("No DMAR devices found\n");
506 ret = -ENODEV;
507 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700508
Jiang Liucc053012014-01-06 14:18:24 +0800509 if (ret < 0)
510 dmar_table_initialized = ret;
511 else
512 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800513 }
514
Jiang Liucc053012014-01-06 14:18:24 +0800515 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700516}
517
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100518static void warn_invalid_dmar(u64 addr, const char *message)
519{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100520 WARN_TAINT_ONCE(
521 1, TAINT_FIRMWARE_WORKAROUND,
522 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
523 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
524 addr, message,
525 dmi_get_system_info(DMI_BIOS_VENDOR),
526 dmi_get_system_info(DMI_BIOS_VERSION),
527 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100528}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000529
Rashika Kheria21004dc2013-12-18 12:01:46 +0530530static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000531{
532 struct acpi_table_dmar *dmar;
533 struct acpi_dmar_header *entry_header;
534 struct acpi_dmar_hardware_unit *drhd;
535
536 dmar = (struct acpi_table_dmar *)dmar_tbl;
537 entry_header = (struct acpi_dmar_header *)(dmar + 1);
538
539 while (((unsigned long)entry_header) <
540 (((unsigned long)dmar) + dmar_tbl->length)) {
541 /* Avoid looping forever on bad ACPI tables */
542 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400543 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000544 return 0;
545 }
546
547 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000548 void __iomem *addr;
549 u64 cap, ecap;
550
David Woodhouse86cf8982009-11-09 22:15:15 +0000551 drhd = (void *)entry_header;
552 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100553 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000554 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000555 }
Chris Wright2c992202009-12-02 09:17:13 +0000556
557 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
558 if (!addr ) {
559 printk("IOMMU: can't validate: %llx\n", drhd->address);
560 goto failed;
561 }
562 cap = dmar_readq(addr + DMAR_CAP_REG);
563 ecap = dmar_readq(addr + DMAR_ECAP_REG);
564 early_iounmap(addr, VTD_PAGE_SIZE);
565 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100566 warn_invalid_dmar(drhd->address,
567 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000568 goto failed;
569 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000570 }
571
572 entry_header = ((void *)entry_header + entry_header->length);
573 }
574 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000575
576failed:
Chris Wright2c992202009-12-02 09:17:13 +0000577 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000578}
579
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400580int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700581{
582 int ret;
583
Jiang Liu3a5670e2014-02-19 14:07:33 +0800584 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700585 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000586 if (ret)
587 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700588 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800589 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700590 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800591 /* Make sure ACS will be enabled */
592 pci_request_acs();
593 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700594
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900595#ifdef CONFIG_X86
596 if (ret)
597 x86_init.iommu.iommu_init = intel_iommu_init;
598#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700599 }
Jiang Liub707cb02014-01-06 14:18:26 +0800600 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700601 dmar_tbl = NULL;
Jiang Liu3a5670e2014-02-19 14:07:33 +0800602 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400603
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400604 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700605}
606
607
Donald Dutile6f5cf522012-06-04 17:29:02 -0400608static void unmap_iommu(struct intel_iommu *iommu)
609{
610 iounmap(iommu->reg);
611 release_mem_region(iommu->reg_phys, iommu->reg_size);
612}
613
614/**
615 * map_iommu: map the iommu's registers
616 * @iommu: the iommu to map
617 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400618 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400619 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400620 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400621 */
622static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
623{
624 int map_size, err=0;
625
626 iommu->reg_phys = phys_addr;
627 iommu->reg_size = VTD_PAGE_SIZE;
628
629 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
630 pr_err("IOMMU: can't reserve memory\n");
631 err = -EBUSY;
632 goto out;
633 }
634
635 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
636 if (!iommu->reg) {
637 pr_err("IOMMU: can't map the region\n");
638 err = -ENOMEM;
639 goto release;
640 }
641
642 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
643 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
644
645 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
646 err = -EINVAL;
647 warn_invalid_dmar(phys_addr, " returns all ones");
648 goto unmap;
649 }
650
651 /* the registers might be more than one page */
652 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
653 cap_max_fault_reg_offset(iommu->cap));
654 map_size = VTD_PAGE_ALIGN(map_size);
655 if (map_size > iommu->reg_size) {
656 iounmap(iommu->reg);
657 release_mem_region(iommu->reg_phys, iommu->reg_size);
658 iommu->reg_size = map_size;
659 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
660 iommu->name)) {
661 pr_err("IOMMU: can't reserve memory\n");
662 err = -EBUSY;
663 goto out;
664 }
665 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
666 if (!iommu->reg) {
667 pr_err("IOMMU: can't map the region\n");
668 err = -ENOMEM;
669 goto release;
670 }
671 }
672 err = 0;
673 goto out;
674
675unmap:
676 iounmap(iommu->reg);
677release:
678 release_mem_region(iommu->reg_phys, iommu->reg_size);
679out:
680 return err;
681}
682
Jiang Liu694835d2014-01-06 14:18:16 +0800683static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700684{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700685 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900686 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700687 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100688 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700689 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400690 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700691
David Woodhouse6ecbf012009-12-02 09:20:27 +0000692 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100693 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000694 return -EINVAL;
695 }
696
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700697 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
698 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700699 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700700
701 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700702 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700703
Donald Dutile6f5cf522012-06-04 17:29:02 -0400704 err = map_iommu(iommu, drhd->reg_base_addr);
705 if (err) {
706 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700707 goto error;
708 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700709
Donald Dutile6f5cf522012-06-04 17:29:02 -0400710 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800711 agaw = iommu_calculate_agaw(iommu);
712 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400713 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
714 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100715 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700716 }
717 msagaw = iommu_calculate_max_sagaw(iommu);
718 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400719 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800720 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100721 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800722 }
723 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700724 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800725
Suresh Siddhaee34b322009-10-02 11:01:21 -0700726 iommu->node = -1;
727
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700728 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100729 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
730 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700731 (unsigned long long)drhd->reg_base_addr,
732 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
733 (unsigned long long)iommu->cap,
734 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700735
Takao Indoh3a93c842013-04-23 17:35:03 +0900736 /* Reflect status in gcmd */
737 sts = readl(iommu->reg + DMAR_GSTS_REG);
738 if (sts & DMA_GSTS_IRES)
739 iommu->gcmd |= DMA_GCMD_IRE;
740 if (sts & DMA_GSTS_TES)
741 iommu->gcmd |= DMA_GCMD_TE;
742 if (sts & DMA_GSTS_QIES)
743 iommu->gcmd |= DMA_GCMD_QIE;
744
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200745 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700746
747 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700748 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100749
750 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400751 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100752 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700753 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400754 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700755}
756
Jiang Liua868e6b2014-01-06 14:18:20 +0800757static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700758{
Jiang Liua868e6b2014-01-06 14:18:20 +0800759 if (iommu->irq) {
760 free_irq(iommu->irq, iommu);
761 irq_set_handler_data(iommu->irq, NULL);
762 destroy_irq(iommu->irq);
763 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700764
Jiang Liua84da702014-01-06 14:18:23 +0800765 if (iommu->qi) {
766 free_page((unsigned long)iommu->qi->desc);
767 kfree(iommu->qi->desc_status);
768 kfree(iommu->qi);
769 }
770
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700771 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400772 unmap_iommu(iommu);
773
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700774 kfree(iommu);
775}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700776
777/*
778 * Reclaim all the submitted descriptors which have completed its work.
779 */
780static inline void reclaim_free_desc(struct q_inval *qi)
781{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800782 while (qi->desc_status[qi->free_tail] == QI_DONE ||
783 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700784 qi->desc_status[qi->free_tail] = QI_FREE;
785 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
786 qi->free_cnt++;
787 }
788}
789
Yu Zhao704126a2009-01-04 16:28:52 +0800790static int qi_check_fault(struct intel_iommu *iommu, int index)
791{
792 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800793 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800794 struct q_inval *qi = iommu->qi;
795 int wait_index = (index + 1) % QI_LENGTH;
796
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800797 if (qi->desc_status[wait_index] == QI_ABORT)
798 return -EAGAIN;
799
Yu Zhao704126a2009-01-04 16:28:52 +0800800 fault = readl(iommu->reg + DMAR_FSTS_REG);
801
802 /*
803 * If IQE happens, the head points to the descriptor associated
804 * with the error. No new descriptors are fetched until the IQE
805 * is cleared.
806 */
807 if (fault & DMA_FSTS_IQE) {
808 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800809 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400810 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800811 "low=%llx, high=%llx\n",
812 (unsigned long long)qi->desc[index].low,
813 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800814 memcpy(&qi->desc[index], &qi->desc[wait_index],
815 sizeof(struct qi_desc));
816 __iommu_flush_cache(iommu, &qi->desc[index],
817 sizeof(struct qi_desc));
818 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
819 return -EINVAL;
820 }
821 }
822
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800823 /*
824 * If ITE happens, all pending wait_desc commands are aborted.
825 * No new descriptors are fetched until the ITE is cleared.
826 */
827 if (fault & DMA_FSTS_ITE) {
828 head = readl(iommu->reg + DMAR_IQH_REG);
829 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
830 head |= 1;
831 tail = readl(iommu->reg + DMAR_IQT_REG);
832 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
833
834 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
835
836 do {
837 if (qi->desc_status[head] == QI_IN_USE)
838 qi->desc_status[head] = QI_ABORT;
839 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
840 } while (head != tail);
841
842 if (qi->desc_status[wait_index] == QI_ABORT)
843 return -EAGAIN;
844 }
845
846 if (fault & DMA_FSTS_ICE)
847 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
848
Yu Zhao704126a2009-01-04 16:28:52 +0800849 return 0;
850}
851
Suresh Siddhafe962e92008-07-10 11:16:42 -0700852/*
853 * Submit the queued invalidation descriptor to the remapping
854 * hardware unit and wait for its completion.
855 */
Yu Zhao704126a2009-01-04 16:28:52 +0800856int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700857{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800858 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700859 struct q_inval *qi = iommu->qi;
860 struct qi_desc *hw, wait_desc;
861 int wait_index, index;
862 unsigned long flags;
863
864 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800865 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700866
867 hw = qi->desc;
868
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800869restart:
870 rc = 0;
871
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200872 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700873 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200874 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700875 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200876 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700877 }
878
879 index = qi->free_head;
880 wait_index = (index + 1) % QI_LENGTH;
881
882 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
883
884 hw[index] = *desc;
885
Yu Zhao704126a2009-01-04 16:28:52 +0800886 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
887 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700888 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
889
890 hw[wait_index] = wait_desc;
891
892 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
893 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
894
895 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
896 qi->free_cnt -= 2;
897
Suresh Siddhafe962e92008-07-10 11:16:42 -0700898 /*
899 * update the HW tail register indicating the presence of
900 * new descriptors.
901 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800902 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700903
904 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700905 /*
906 * We will leave the interrupts disabled, to prevent interrupt
907 * context to queue another cmd while a cmd is already submitted
908 * and waiting for completion on this cpu. This is to avoid
909 * a deadlock where the interrupt context can wait indefinitely
910 * for free slots in the queue.
911 */
Yu Zhao704126a2009-01-04 16:28:52 +0800912 rc = qi_check_fault(iommu, index);
913 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800914 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800915
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200916 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700917 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200918 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700919 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800920
921 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700922
923 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200924 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800925
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800926 if (rc == -EAGAIN)
927 goto restart;
928
Yu Zhao704126a2009-01-04 16:28:52 +0800929 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700930}
931
932/*
933 * Flush the global interrupt entry cache.
934 */
935void qi_global_iec(struct intel_iommu *iommu)
936{
937 struct qi_desc desc;
938
939 desc.low = QI_IEC_TYPE;
940 desc.high = 0;
941
Yu Zhao704126a2009-01-04 16:28:52 +0800942 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700943 qi_submit_sync(&desc, iommu);
944}
945
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100946void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
947 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700948{
Youquan Song3481f212008-10-16 16:31:55 -0700949 struct qi_desc desc;
950
Youquan Song3481f212008-10-16 16:31:55 -0700951 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
952 | QI_CC_GRAN(type) | QI_CC_TYPE;
953 desc.high = 0;
954
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100955 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700956}
957
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100958void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
959 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700960{
961 u8 dw = 0, dr = 0;
962
963 struct qi_desc desc;
964 int ih = 0;
965
Youquan Song3481f212008-10-16 16:31:55 -0700966 if (cap_write_drain(iommu->cap))
967 dw = 1;
968
969 if (cap_read_drain(iommu->cap))
970 dr = 1;
971
972 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
973 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
974 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
975 | QI_IOTLB_AM(size_order);
976
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100977 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700978}
979
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800980void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
981 u64 addr, unsigned mask)
982{
983 struct qi_desc desc;
984
985 if (mask) {
986 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
987 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
988 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
989 } else
990 desc.high = QI_DEV_IOTLB_ADDR(addr);
991
992 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
993 qdep = 0;
994
995 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
996 QI_DIOTLB_TYPE;
997
998 qi_submit_sync(&desc, iommu);
999}
1000
Suresh Siddhafe962e92008-07-10 11:16:42 -07001001/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001002 * Disable Queued Invalidation interface.
1003 */
1004void dmar_disable_qi(struct intel_iommu *iommu)
1005{
1006 unsigned long flags;
1007 u32 sts;
1008 cycles_t start_time = get_cycles();
1009
1010 if (!ecap_qis(iommu->ecap))
1011 return;
1012
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001013 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001014
1015 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1016 if (!(sts & DMA_GSTS_QIES))
1017 goto end;
1018
1019 /*
1020 * Give a chance to HW to complete the pending invalidation requests.
1021 */
1022 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1023 readl(iommu->reg + DMAR_IQH_REG)) &&
1024 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1025 cpu_relax();
1026
1027 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001028 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1029
1030 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1031 !(sts & DMA_GSTS_QIES), sts);
1032end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001033 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001034}
1035
1036/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001037 * Enable queued invalidation.
1038 */
1039static void __dmar_enable_qi(struct intel_iommu *iommu)
1040{
David Woodhousec416daa2009-05-10 20:30:58 +01001041 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001042 unsigned long flags;
1043 struct q_inval *qi = iommu->qi;
1044
1045 qi->free_head = qi->free_tail = 0;
1046 qi->free_cnt = QI_LENGTH;
1047
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001048 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001049
1050 /* write zero to the tail reg */
1051 writel(0, iommu->reg + DMAR_IQT_REG);
1052
1053 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1054
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001055 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001056 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001057
1058 /* Make sure hardware complete it */
1059 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1060
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001061 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001062}
1063
1064/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001065 * Enable Queued Invalidation interface. This is a must to support
1066 * interrupt-remapping. Also used by DMA-remapping, which replaces
1067 * register based IOTLB invalidation.
1068 */
1069int dmar_enable_qi(struct intel_iommu *iommu)
1070{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001071 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001072 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001073
1074 if (!ecap_qis(iommu->ecap))
1075 return -ENOENT;
1076
1077 /*
1078 * queued invalidation is already setup and enabled.
1079 */
1080 if (iommu->qi)
1081 return 0;
1082
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001083 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001084 if (!iommu->qi)
1085 return -ENOMEM;
1086
1087 qi = iommu->qi;
1088
Suresh Siddha751cafe2009-10-02 11:01:22 -07001089
1090 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1091 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001092 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001093 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001094 return -ENOMEM;
1095 }
1096
Suresh Siddha751cafe2009-10-02 11:01:22 -07001097 qi->desc = page_address(desc_page);
1098
Hannes Reinecke37a40712013-02-06 09:50:10 +01001099 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001100 if (!qi->desc_status) {
1101 free_page((unsigned long) qi->desc);
1102 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001103 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001104 return -ENOMEM;
1105 }
1106
1107 qi->free_head = qi->free_tail = 0;
1108 qi->free_cnt = QI_LENGTH;
1109
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001110 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001111
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001112 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001113
1114 return 0;
1115}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001116
1117/* iommu interrupt handling. Most stuff are MSI-like. */
1118
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001119enum faulttype {
1120 DMA_REMAP,
1121 INTR_REMAP,
1122 UNKNOWN,
1123};
1124
1125static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001126{
1127 "Software",
1128 "Present bit in root entry is clear",
1129 "Present bit in context entry is clear",
1130 "Invalid context entry",
1131 "Access beyond MGAW",
1132 "PTE Write access is not set",
1133 "PTE Read access is not set",
1134 "Next page table ptr is invalid",
1135 "Root table address invalid",
1136 "Context table ptr is invalid",
1137 "non-zero reserved fields in RTP",
1138 "non-zero reserved fields in CTP",
1139 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001140 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001141};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001142
Suresh Siddha95a02e92012-03-30 11:47:07 -07001143static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001144{
1145 "Detected reserved fields in the decoded interrupt-remapped request",
1146 "Interrupt index exceeded the interrupt-remapping table size",
1147 "Present field in the IRTE entry is clear",
1148 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1149 "Detected reserved fields in the IRTE entry",
1150 "Blocked a compatibility format interrupt request",
1151 "Blocked an interrupt request due to source-id verification failure",
1152};
1153
Rashika Kheria21004dc2013-12-18 12:01:46 +05301154static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001155{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001156 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1157 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001158 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001159 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001160 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1161 *fault_type = DMA_REMAP;
1162 return dma_remap_fault_reasons[fault_reason];
1163 } else {
1164 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001166 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001167}
1168
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001169void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001170{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001171 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001172 unsigned long flag;
1173
1174 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001176 writel(0, iommu->reg + DMAR_FECTL_REG);
1177 /* Read a reg to force flush the post write */
1178 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001179 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001180}
1181
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001182void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001183{
1184 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001185 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001186
1187 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001188 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001189 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1190 /* Read a reg to force flush the post write */
1191 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001192 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001193}
1194
1195void dmar_msi_write(int irq, struct msi_msg *msg)
1196{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001197 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001198 unsigned long flag;
1199
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001200 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001201 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1202 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1203 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001204 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001205}
1206
1207void dmar_msi_read(int irq, struct msi_msg *msg)
1208{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001209 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001210 unsigned long flag;
1211
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001212 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001213 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1214 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1215 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001216 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001217}
1218
1219static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1220 u8 fault_reason, u16 source_id, unsigned long long addr)
1221{
1222 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001223 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001224
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001225 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001226
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001227 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001228 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001229 "fault index %llx\n"
1230 "INTR-REMAP:[fault reason %02d] %s\n",
1231 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1232 PCI_FUNC(source_id & 0xFF), addr >> 48,
1233 fault_reason, reason);
1234 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001235 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001236 "fault addr %llx \n"
1237 "DMAR:[fault reason %02d] %s\n",
1238 (type ? "DMA Read" : "DMA Write"),
1239 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1240 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001241 return 0;
1242}
1243
1244#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001245irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001246{
1247 struct intel_iommu *iommu = dev_id;
1248 int reg, fault_index;
1249 u32 fault_status;
1250 unsigned long flag;
1251
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001252 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001253 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001254 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001255 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001256
1257 /* TBD: ignore advanced fault log currently */
1258 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001259 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001260
1261 fault_index = dma_fsts_fault_record_index(fault_status);
1262 reg = cap_fault_reg_offset(iommu->cap);
1263 while (1) {
1264 u8 fault_reason;
1265 u16 source_id;
1266 u64 guest_addr;
1267 int type;
1268 u32 data;
1269
1270 /* highest 32 bits */
1271 data = readl(iommu->reg + reg +
1272 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1273 if (!(data & DMA_FRCD_F))
1274 break;
1275
1276 fault_reason = dma_frcd_fault_reason(data);
1277 type = dma_frcd_type(data);
1278
1279 data = readl(iommu->reg + reg +
1280 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1281 source_id = dma_frcd_source_id(data);
1282
1283 guest_addr = dmar_readq(iommu->reg + reg +
1284 fault_index * PRIMARY_FAULT_REG_LEN);
1285 guest_addr = dma_frcd_page_addr(guest_addr);
1286 /* clear the fault */
1287 writel(DMA_FRCD_F, iommu->reg + reg +
1288 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1289
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001290 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001291
1292 dmar_fault_do_one(iommu, type, fault_reason,
1293 source_id, guest_addr);
1294
1295 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001296 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001297 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001298 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001299 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001300
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001301 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1302
1303unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001304 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001305 return IRQ_HANDLED;
1306}
1307
1308int dmar_set_interrupt(struct intel_iommu *iommu)
1309{
1310 int irq, ret;
1311
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001312 /*
1313 * Check if the fault interrupt is already initialized.
1314 */
1315 if (iommu->irq)
1316 return 0;
1317
Suresh Siddha0ac24912009-03-16 17:04:54 -07001318 irq = create_irq();
1319 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001320 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001321 return -EINVAL;
1322 }
1323
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001324 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001325 iommu->irq = irq;
1326
1327 ret = arch_setup_dmar_msi(irq);
1328 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001329 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001330 iommu->irq = 0;
1331 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001332 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001333 }
1334
Thomas Gleixner477694e2011-07-19 16:25:42 +02001335 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001336 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001337 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001338 return ret;
1339}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001340
1341int __init enable_drhd_fault_handling(void)
1342{
1343 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001344 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001345
1346 /*
1347 * Enable fault control interrupt.
1348 */
Jiang Liu7c919772014-01-06 14:18:18 +08001349 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001350 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001351 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001352
1353 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001354 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001355 (unsigned long long)drhd->reg_base_addr, ret);
1356 return -1;
1357 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001358
1359 /*
1360 * Clear any previous faults.
1361 */
1362 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001363 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1364 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001365 }
1366
1367 return 0;
1368}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001369
1370/*
1371 * Re-enable Queued Invalidation interface.
1372 */
1373int dmar_reenable_qi(struct intel_iommu *iommu)
1374{
1375 if (!ecap_qis(iommu->ecap))
1376 return -ENOENT;
1377
1378 if (!iommu->qi)
1379 return -ENOENT;
1380
1381 /*
1382 * First disable queued invalidation.
1383 */
1384 dmar_disable_qi(iommu);
1385 /*
1386 * Then enable queued invalidation again. Since there is no pending
1387 * invalidation requests now, it's safe to re-enable queued
1388 * invalidation.
1389 */
1390 __dmar_enable_qi(iommu);
1391
1392 return 0;
1393}
Youquan Song074835f2009-09-09 12:05:39 -04001394
1395/*
1396 * Check interrupt remapping support in DMAR table description.
1397 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001398int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001399{
1400 struct acpi_table_dmar *dmar;
1401 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001402 if (!dmar)
1403 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001404 return dmar->flags & 0x1;
1405}
Jiang Liu694835d2014-01-06 14:18:16 +08001406
Jiang Liua868e6b2014-01-06 14:18:20 +08001407static int __init dmar_free_unused_resources(void)
1408{
1409 struct dmar_drhd_unit *dmaru, *dmaru_n;
1410
1411 /* DMAR units are in use */
1412 if (irq_remapping_enabled || intel_iommu_enabled)
1413 return 0;
1414
Jiang Liu3a5670e2014-02-19 14:07:33 +08001415 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001416 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1417 list_del(&dmaru->list);
1418 dmar_free_drhd(dmaru);
1419 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001420 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001421
1422 return 0;
1423}
1424
1425late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001426IOMMU_INIT_POST(detect_intel_iommu);