blob: 2a901219f953065746b92dab03abb1e5b56ed5e6 [file] [log] [blame]
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001
2#define pr_fmt(fmt) "DMAR-IR: " fmt
3
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07004#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07005#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07006#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07008#include <linux/jiffies.h>
Suresh Siddha20f30972009-08-04 12:07:08 -07009#include <linux/hpet.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -070010#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -070011#include <linux/irq.h>
Lv Zheng8b484632013-12-03 08:49:16 +080012#include <linux/intel-iommu.h>
13#include <linux/acpi.h>
Joerg Roedelaf3b3582015-06-12 15:00:21 +020014#include <linux/crash_dump.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070015#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -080016#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +053017#include <asm/cpu.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070018#include <asm/irq_remapping.h>
Weidong Hanf007e992009-05-23 00:41:15 +080019#include <asm/pci-direct.h>
Joerg Roedel5e2b9302012-03-30 11:47:05 -070020#include <asm/msidef.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070021
Suresh Siddha8a8f4222012-03-30 11:47:08 -070022#include "irq_remapping.h"
Joerg Roedel736baef2012-03-30 11:47:00 -070023
Joerg Roedeleef93fd2012-03-30 11:46:59 -070024struct ioapic_scope {
25 struct intel_iommu *iommu;
26 unsigned int id;
27 unsigned int bus; /* PCI bus number */
28 unsigned int devfn; /* PCI devfn number */
29};
30
31struct hpet_scope {
32 struct intel_iommu *iommu;
33 u8 id;
34 unsigned int bus;
35 unsigned int devfn;
36};
37
38#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
Jiang Liu13d09b62015-01-07 15:31:37 +080039#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
Joerg Roedeleef93fd2012-03-30 11:46:59 -070040
Jiang Liu13d09b62015-01-07 15:31:37 +080041static int __read_mostly eim_mode;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070042static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
Suresh Siddha20f30972009-08-04 12:07:08 -070043static struct hpet_scope ir_hpet[MAX_HPET_TBS];
Chris Wrightd1423d52010-07-20 11:06:49 -070044
Jiang Liu3a5670e2014-02-19 14:07:33 +080045/*
46 * Lock ordering:
47 * ->dmar_global_lock
48 * ->irq_2_ir_lock
49 * ->qi->q_lock
50 * ->iommu->register_lock
51 * Note:
52 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
53 * in single-threaded environment with interrupt disabled, so no need to tabke
54 * the dmar_global_lock.
55 */
Thomas Gleixner96f8e982011-07-19 16:28:19 +020056static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
Thomas Gleixnerd585d062010-10-10 12:34:27 +020057
Joerg Roedelaf3b3582015-06-12 15:00:21 +020058static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080059static int __init parse_ioapics_under_ir(void);
60
Joerg Roedelaf3b3582015-06-12 15:00:21 +020061static bool ir_pre_enabled(struct intel_iommu *iommu)
62{
63 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
64}
65
66static void clear_ir_pre_enabled(struct intel_iommu *iommu)
67{
68 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
69}
70
71static void init_ir_status(struct intel_iommu *iommu)
72{
73 u32 gsts;
74
75 gsts = readl(iommu->reg + DMAR_GSTS_REG);
76 if (gsts & DMA_GSTS_IRES)
77 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
78}
79
Yinghai Lue420dfb2008-08-19 20:50:21 -070080static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
81{
Jiang Liu91411da2014-10-27 16:12:09 +080082 struct irq_cfg *cfg = irq_cfg(irq);
Thomas Gleixner349d6762010-10-10 12:29:27 +020083 return cfg ? &cfg->irq_2_iommu : NULL;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080084}
85
Rashika Kheria6a7885c2013-12-18 12:04:27 +053086static int get_irte(int irq, struct irte *entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -070087{
Thomas Gleixnerd585d062010-10-10 12:34:27 +020088 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Suresh Siddha4c5502b2009-03-16 17:04:53 -070089 unsigned long flags;
Thomas Gleixnerd585d062010-10-10 12:34:27 +020090 int index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -070091
Thomas Gleixnerd585d062010-10-10 12:34:27 +020092 if (!entry || !irq_iommu)
Suresh Siddhab6fcb332008-07-10 11:16:44 -070093 return -1;
94
Thomas Gleixner96f8e982011-07-19 16:28:19 +020095 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -070096
Greg Edwardsaf437462014-07-23 10:13:26 -060097 if (unlikely(!irq_iommu->iommu)) {
98 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
99 return -1;
100 }
101
Yinghai Lue420dfb2008-08-19 20:50:21 -0700102 index = irq_iommu->irte_index + irq_iommu->sub_handle;
103 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700104
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200105 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700106 return 0;
107}
108
Joerg Roedel263b5e82012-03-30 11:47:06 -0700109static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700110{
111 struct ir_table *table = iommu->ir_table;
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200112 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Jiang Liu91411da2014-10-27 16:12:09 +0800113 struct irq_cfg *cfg = irq_cfg(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700114 unsigned int mask = 0;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700115 unsigned long flags;
Dan Carpenter9f4c7442014-01-09 08:32:36 +0300116 int index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700117
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200118 if (!count || !irq_iommu)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700119 return -1;
120
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700121 if (count > 1) {
122 count = __roundup_pow_of_two(count);
123 mask = ilog2(count);
124 }
125
126 if (mask > ecap_max_handle_mask(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200127 pr_err("Requested mask %x exceeds the max invalidation handle"
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700128 " mask value %Lx\n", mask,
129 ecap_max_handle_mask(iommu->ecap));
130 return -1;
131 }
132
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200133 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Jiang Liu360eb3c52014-01-06 14:18:08 +0800134 index = bitmap_find_free_region(table->bitmap,
135 INTR_REMAP_TABLE_ENTRIES, mask);
136 if (index < 0) {
137 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
138 } else {
139 cfg->remapped = 1;
140 irq_iommu->iommu = iommu;
141 irq_iommu->irte_index = index;
142 irq_iommu->sub_handle = 0;
143 irq_iommu->irte_mask = mask;
144 }
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200145 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700146
147 return index;
148}
149
Yu Zhao704126a2009-01-04 16:28:52 +0800150static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700151{
152 struct qi_desc desc;
153
154 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
155 | QI_IEC_SELECTIVE;
156 desc.high = 0;
157
Yu Zhao704126a2009-01-04 16:28:52 +0800158 return qi_submit_sync(&desc, iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700159}
160
Joerg Roedel263b5e82012-03-30 11:47:06 -0700161static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700162{
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200163 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700164 unsigned long flags;
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200165 int index;
166
167 if (!irq_iommu)
168 return -1;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700169
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200170 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700171 *sub_handle = irq_iommu->sub_handle;
172 index = irq_iommu->irte_index;
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200173 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700174 return index;
175}
176
Joerg Roedel263b5e82012-03-30 11:47:06 -0700177static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700178{
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200179 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Jiang Liu91411da2014-10-27 16:12:09 +0800180 struct irq_cfg *cfg = irq_cfg(irq);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700181 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700182
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200183 if (!irq_iommu)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800184 return -1;
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200185
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200186 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800187
Joerg Roedel9b1b0e42012-09-26 12:44:45 +0200188 cfg->remapped = 1;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700189 irq_iommu->iommu = iommu;
190 irq_iommu->irte_index = index;
191 irq_iommu->sub_handle = subhandle;
192 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700193
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200194 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700195
196 return 0;
197}
198
Joerg Roedel263b5e82012-03-30 11:47:06 -0700199static int modify_irte(int irq, struct irte *irte_modified)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700200{
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200201 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700202 struct intel_iommu *iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700203 unsigned long flags;
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200204 struct irte *irte;
205 int rc, index;
206
207 if (!irq_iommu)
208 return -1;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700209
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200210 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700211
Yinghai Lue420dfb2008-08-19 20:50:21 -0700212 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700213
Yinghai Lue420dfb2008-08-19 20:50:21 -0700214 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700215 irte = &iommu->ir_table->base[index];
216
Linus Torvaldsc513b672010-08-06 11:02:31 -0700217 set_64bit(&irte->low, irte_modified->low);
218 set_64bit(&irte->high, irte_modified->high);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700219 __iommu_flush_cache(iommu, irte, sizeof(*irte));
220
Yu Zhao704126a2009-01-04 16:28:52 +0800221 rc = qi_flush_iec(iommu, index, 0);
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200222 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800223
224 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700225}
226
Joerg Roedel263b5e82012-03-30 11:47:06 -0700227static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
Suresh Siddha20f30972009-08-04 12:07:08 -0700228{
229 int i;
230
231 for (i = 0; i < MAX_HPET_TBS; i++)
Jiang Liua7a3dad2014-11-09 22:48:00 +0800232 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
Suresh Siddha20f30972009-08-04 12:07:08 -0700233 return ir_hpet[i].iommu;
234 return NULL;
235}
236
Joerg Roedel263b5e82012-03-30 11:47:06 -0700237static struct intel_iommu *map_ioapic_to_ir(int apic)
Suresh Siddha89027d32008-07-10 11:16:56 -0700238{
239 int i;
240
241 for (i = 0; i < MAX_IO_APICS; i++)
Jiang Liua7a3dad2014-11-09 22:48:00 +0800242 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
Suresh Siddha89027d32008-07-10 11:16:56 -0700243 return ir_ioapic[i].iommu;
244 return NULL;
245}
246
Joerg Roedel263b5e82012-03-30 11:47:06 -0700247static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700248{
249 struct dmar_drhd_unit *drhd;
250
251 drhd = dmar_find_matched_drhd_unit(dev);
252 if (!drhd)
253 return NULL;
254
255 return drhd->iommu;
256}
257
Weidong Hanc4658b42009-05-23 00:41:14 +0800258static int clear_entries(struct irq_2_iommu *irq_iommu)
259{
260 struct irte *start, *entry, *end;
261 struct intel_iommu *iommu;
262 int index;
263
264 if (irq_iommu->sub_handle)
265 return 0;
266
267 iommu = irq_iommu->iommu;
268 index = irq_iommu->irte_index + irq_iommu->sub_handle;
269
270 start = iommu->ir_table->base + index;
271 end = start + (1 << irq_iommu->irte_mask);
272
273 for (entry = start; entry < end; entry++) {
Linus Torvaldsc513b672010-08-06 11:02:31 -0700274 set_64bit(&entry->low, 0);
275 set_64bit(&entry->high, 0);
Weidong Hanc4658b42009-05-23 00:41:14 +0800276 }
Jiang Liu360eb3c52014-01-06 14:18:08 +0800277 bitmap_release_region(iommu->ir_table->bitmap, index,
278 irq_iommu->irte_mask);
Weidong Hanc4658b42009-05-23 00:41:14 +0800279
280 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
281}
282
Joerg Roedel9d619f62012-03-30 11:47:04 -0700283static int free_irte(int irq)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700284{
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200285 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700286 unsigned long flags;
Thomas Gleixnerd585d062010-10-10 12:34:27 +0200287 int rc;
288
289 if (!irq_iommu)
290 return -1;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700291
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200292 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700293
Weidong Hanc4658b42009-05-23 00:41:14 +0800294 rc = clear_entries(irq_iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700295
Yinghai Lue420dfb2008-08-19 20:50:21 -0700296 irq_iommu->iommu = NULL;
297 irq_iommu->irte_index = 0;
298 irq_iommu->sub_handle = 0;
299 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700300
Thomas Gleixner96f8e982011-07-19 16:28:19 +0200301 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700302
Yu Zhao704126a2009-01-04 16:28:52 +0800303 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700304}
305
Weidong Hanf007e992009-05-23 00:41:15 +0800306/*
307 * source validation type
308 */
309#define SVT_NO_VERIFY 0x0 /* no verification is required */
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300310#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
Weidong Hanf007e992009-05-23 00:41:15 +0800311#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
312
313/*
314 * source-id qualifier
315 */
316#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
317#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
318 * the third least significant bit
319 */
320#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
321 * the second and third least significant bits
322 */
323#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
324 * the least three significant bits
325 */
326
327/*
328 * set SVT, SQ and SID fields of irte to verify
329 * source ids of interrupt requests
330 */
331static void set_irte_sid(struct irte *irte, unsigned int svt,
332 unsigned int sq, unsigned int sid)
333{
Chris Wrightd1423d52010-07-20 11:06:49 -0700334 if (disable_sourceid_checking)
335 svt = SVT_NO_VERIFY;
Weidong Hanf007e992009-05-23 00:41:15 +0800336 irte->svt = svt;
337 irte->sq = sq;
338 irte->sid = sid;
339}
340
Joerg Roedel263b5e82012-03-30 11:47:06 -0700341static int set_ioapic_sid(struct irte *irte, int apic)
Weidong Hanf007e992009-05-23 00:41:15 +0800342{
343 int i;
344 u16 sid = 0;
345
346 if (!irte)
347 return -1;
348
Jiang Liu3a5670e2014-02-19 14:07:33 +0800349 down_read(&dmar_global_lock);
Weidong Hanf007e992009-05-23 00:41:15 +0800350 for (i = 0; i < MAX_IO_APICS; i++) {
Jiang Liua7a3dad2014-11-09 22:48:00 +0800351 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
Weidong Hanf007e992009-05-23 00:41:15 +0800352 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
353 break;
354 }
355 }
Jiang Liu3a5670e2014-02-19 14:07:33 +0800356 up_read(&dmar_global_lock);
Weidong Hanf007e992009-05-23 00:41:15 +0800357
358 if (sid == 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200359 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
Weidong Hanf007e992009-05-23 00:41:15 +0800360 return -1;
361 }
362
Jiang Liu2fe2c602014-01-06 14:18:17 +0800363 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
Weidong Hanf007e992009-05-23 00:41:15 +0800364
365 return 0;
366}
367
Joerg Roedel263b5e82012-03-30 11:47:06 -0700368static int set_hpet_sid(struct irte *irte, u8 id)
Suresh Siddha20f30972009-08-04 12:07:08 -0700369{
370 int i;
371 u16 sid = 0;
372
373 if (!irte)
374 return -1;
375
Jiang Liu3a5670e2014-02-19 14:07:33 +0800376 down_read(&dmar_global_lock);
Suresh Siddha20f30972009-08-04 12:07:08 -0700377 for (i = 0; i < MAX_HPET_TBS; i++) {
Jiang Liua7a3dad2014-11-09 22:48:00 +0800378 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
Suresh Siddha20f30972009-08-04 12:07:08 -0700379 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
380 break;
381 }
382 }
Jiang Liu3a5670e2014-02-19 14:07:33 +0800383 up_read(&dmar_global_lock);
Suresh Siddha20f30972009-08-04 12:07:08 -0700384
385 if (sid == 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200386 pr_warn("Failed to set source-id of HPET block (%d)\n", id);
Suresh Siddha20f30972009-08-04 12:07:08 -0700387 return -1;
388 }
389
390 /*
391 * Should really use SQ_ALL_16. Some platforms are broken.
392 * While we figure out the right quirks for these broken platforms, use
393 * SQ_13_IGNORE_3 for now.
394 */
395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
396
397 return 0;
398}
399
Alex Williamson579305f2014-07-03 09:51:43 -0600400struct set_msi_sid_data {
401 struct pci_dev *pdev;
402 u16 alias;
403};
404
405static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
406{
407 struct set_msi_sid_data *data = opaque;
408
409 data->pdev = pdev;
410 data->alias = alias;
411
412 return 0;
413}
414
Joerg Roedel263b5e82012-03-30 11:47:06 -0700415static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
Weidong Hanf007e992009-05-23 00:41:15 +0800416{
Alex Williamson579305f2014-07-03 09:51:43 -0600417 struct set_msi_sid_data data;
Weidong Hanf007e992009-05-23 00:41:15 +0800418
419 if (!irte || !dev)
420 return -1;
421
Alex Williamson579305f2014-07-03 09:51:43 -0600422 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
Weidong Hanf007e992009-05-23 00:41:15 +0800423
Alex Williamson579305f2014-07-03 09:51:43 -0600424 /*
425 * DMA alias provides us with a PCI device and alias. The only case
426 * where the it will return an alias on a different bus than the
427 * device is the case of a PCIe-to-PCI bridge, where the alias is for
428 * the subordinate bus. In this case we can only verify the bus.
429 *
430 * If the alias device is on a different bus than our source device
431 * then we have a topology based alias, use it.
432 *
433 * Otherwise, the alias is for a device DMA quirk and we cannot
434 * assume that MSI uses the same requester ID. Therefore use the
435 * original device.
436 */
437 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
438 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
439 PCI_DEVID(PCI_BUS_NUM(data.alias),
440 dev->bus->number));
441 else if (data.pdev->bus->number != dev->bus->number)
442 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
443 else
444 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
445 PCI_DEVID(dev->bus->number, dev->devfn));
Weidong Hanf007e992009-05-23 00:41:15 +0800446
447 return 0;
448}
449
Joerg Roedelaf3b3582015-06-12 15:00:21 +0200450static int iommu_load_old_irte(struct intel_iommu *iommu)
451{
452 struct irte *old_ir_table;
453 phys_addr_t irt_phys;
454 size_t size;
455 u64 irta;
456
457 if (!is_kdump_kernel()) {
458 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
459 iommu->name);
460 clear_ir_pre_enabled(iommu);
461 iommu_disable_irq_remapping(iommu);
462 return -EINVAL;
463 }
464
465 /* Check whether the old ir-table has the same size as ours */
466 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
467 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
468 != INTR_REMAP_TABLE_REG_SIZE)
469 return -EINVAL;
470
471 irt_phys = irta & VTD_PAGE_MASK;
472 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
473
474 /* Map the old IR table */
475 old_ir_table = ioremap_cache(irt_phys, size);
476 if (!old_ir_table)
477 return -ENOMEM;
478
479 /* Copy data over */
480 memcpy(iommu->ir_table->base, old_ir_table, size);
481
482 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
483
484 return 0;
485}
486
487
Suresh Siddha95a02e92012-03-30 11:47:07 -0700488static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700489{
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +0200490 unsigned long flags;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700491 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100492 u32 sts;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700493
494 addr = virt_to_phys((void *)iommu->ir_table->base);
495
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200496 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700497
498 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
499 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
500
501 /* Set interrupt-remapping table pointer */
Jan Kiszkaf63ef692014-08-11 13:13:25 +0200502 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700503
504 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
505 readl, (sts & DMA_GSTS_IRTPS), sts);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200506 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700507
508 /*
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +0200509 * Global invalidation of interrupt entry cache to make sure the
510 * hardware uses the new irq remapping table.
Suresh Siddha2ae21012008-07-10 11:16:43 -0700511 */
512 qi_global_iec(iommu);
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +0200513}
514
515static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
516{
517 unsigned long flags;
518 u32 sts;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700519
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200520 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700521
522 /* Enable interrupt-remapping */
Suresh Siddha2ae21012008-07-10 11:16:43 -0700523 iommu->gcmd |= DMA_GCMD_IRE;
Andy Lutomirskiaf8d1022013-02-01 14:57:43 -0800524 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
David Woodhousec416daa2009-05-10 20:30:58 +0100525 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700526
527 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
528 readl, (sts & DMA_GSTS_IRES), sts);
529
Andy Lutomirskiaf8d1022013-02-01 14:57:43 -0800530 /*
531 * With CFI clear in the Global Command register, we should be
532 * protected from dangerous (i.e. compatibility) interrupts
533 * regardless of x2apic status. Check just to be sure.
534 */
535 if (sts & DMA_GSTS_CFIS)
536 WARN(1, KERN_WARNING
537 "Compatibility-format IRQs enabled despite intr remapping;\n"
538 "you are vulnerable to IRQ injection.\n");
539
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200540 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700541}
542
Jiang Liua7a3dad2014-11-09 22:48:00 +0800543static int intel_setup_irq_remapping(struct intel_iommu *iommu)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700544{
545 struct ir_table *ir_table;
546 struct page *pages;
Jiang Liu360eb3c52014-01-06 14:18:08 +0800547 unsigned long *bitmap;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700548
Jiang Liua7a3dad2014-11-09 22:48:00 +0800549 if (iommu->ir_table)
550 return 0;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700551
Thomas Gleixnere3a981d2015-01-07 15:31:30 +0800552 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
Jiang Liua7a3dad2014-11-09 22:48:00 +0800553 if (!ir_table)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700554 return -ENOMEM;
555
Thomas Gleixnere3a981d2015-01-07 15:31:30 +0800556 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
Suresh Siddha824cd752009-10-02 11:01:23 -0700557 INTR_REMAP_PAGE_ORDER);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700558
559 if (!pages) {
Jiang Liu360eb3c52014-01-06 14:18:08 +0800560 pr_err("IR%d: failed to allocate pages of order %d\n",
561 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
Jiang Liua7a3dad2014-11-09 22:48:00 +0800562 goto out_free_table;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700563 }
564
Jiang Liu360eb3c52014-01-06 14:18:08 +0800565 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
566 sizeof(long), GFP_ATOMIC);
567 if (bitmap == NULL) {
568 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
Jiang Liua7a3dad2014-11-09 22:48:00 +0800569 goto out_free_pages;
Jiang Liu360eb3c52014-01-06 14:18:08 +0800570 }
571
Suresh Siddha2ae21012008-07-10 11:16:43 -0700572 ir_table->base = page_address(pages);
Jiang Liu360eb3c52014-01-06 14:18:08 +0800573 ir_table->bitmap = bitmap;
Jiang Liua7a3dad2014-11-09 22:48:00 +0800574 iommu->ir_table = ir_table;
Joerg Roedel9e4e49d2015-06-12 14:23:56 +0200575
576 /*
577 * If the queued invalidation is already initialized,
578 * shouldn't disable it.
579 */
580 if (!iommu->qi) {
581 /*
582 * Clear previous faults.
583 */
584 dmar_fault(-1, iommu);
585 dmar_disable_qi(iommu);
586
587 if (dmar_enable_qi(iommu)) {
588 pr_err("Failed to enable queued invalidation\n");
589 goto out_free_bitmap;
590 }
591 }
592
Joerg Roedelaf3b3582015-06-12 15:00:21 +0200593 init_ir_status(iommu);
594
595 if (ir_pre_enabled(iommu)) {
596 if (iommu_load_old_irte(iommu))
597 pr_err("Failed to copy IR table for %s from previous kernel\n",
598 iommu->name);
599 else
600 pr_info("Copied IR table for %s from previous kernel\n",
601 iommu->name);
602 }
603
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +0200604 iommu_set_irq_remapping(iommu, eim_mode);
605
Suresh Siddha2ae21012008-07-10 11:16:43 -0700606 return 0;
Jiang Liua7a3dad2014-11-09 22:48:00 +0800607
Joerg Roedel9e4e49d2015-06-12 14:23:56 +0200608out_free_bitmap:
609 kfree(bitmap);
Jiang Liua7a3dad2014-11-09 22:48:00 +0800610out_free_pages:
611 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
612out_free_table:
613 kfree(ir_table);
Joerg Roedel9e4e49d2015-06-12 14:23:56 +0200614
615 iommu->ir_table = NULL;
616
Jiang Liua7a3dad2014-11-09 22:48:00 +0800617 return -ENOMEM;
618}
619
620static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
621{
622 if (iommu && iommu->ir_table) {
623 free_pages((unsigned long)iommu->ir_table->base,
624 INTR_REMAP_PAGE_ORDER);
625 kfree(iommu->ir_table->bitmap);
626 kfree(iommu->ir_table);
627 iommu->ir_table = NULL;
628 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700629}
630
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700631/*
632 * Disable Interrupt Remapping.
633 */
Suresh Siddha95a02e92012-03-30 11:47:07 -0700634static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700635{
636 unsigned long flags;
637 u32 sts;
638
639 if (!ecap_ir_support(iommu->ecap))
640 return;
641
Fenghua Yub24696b2009-03-27 14:22:44 -0700642 /*
643 * global invalidation of interrupt entry cache before disabling
644 * interrupt-remapping.
645 */
646 qi_global_iec(iommu);
647
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200648 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700649
650 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
651 if (!(sts & DMA_GSTS_IRES))
652 goto end;
653
654 iommu->gcmd &= ~DMA_GCMD_IRE;
655 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
656
657 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
658 readl, !(sts & DMA_GSTS_IRES), sts);
659
660end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200661 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700662}
663
Suresh Siddha41750d32011-08-23 17:05:18 -0700664static int __init dmar_x2apic_optout(void)
665{
666 struct acpi_table_dmar *dmar;
667 dmar = (struct acpi_table_dmar *)dmar_tbl;
668 if (!dmar || no_x2apic_optout)
669 return 0;
670 return dmar->flags & DMAR_X2APIC_OPT_OUT;
671}
672
Thomas Gleixner11190302015-01-07 15:31:29 +0800673static void __init intel_cleanup_irq_remapping(void)
674{
675 struct dmar_drhd_unit *drhd;
676 struct intel_iommu *iommu;
677
678 for_each_iommu(iommu, drhd) {
679 if (ecap_ir_support(iommu->ecap)) {
680 iommu_disable_irq_remapping(iommu);
681 intel_teardown_irq_remapping(iommu);
682 }
683 }
684
685 if (x2apic_supported())
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200686 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
Thomas Gleixner11190302015-01-07 15:31:29 +0800687}
688
689static int __init intel_prepare_irq_remapping(void)
690{
691 struct dmar_drhd_unit *drhd;
692 struct intel_iommu *iommu;
Joerg Roedel23256d02015-06-12 14:15:49 +0200693 int eim = 0;
Thomas Gleixner11190302015-01-07 15:31:29 +0800694
Jiang Liu2966d952015-01-07 15:31:35 +0800695 if (irq_remap_broken) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200696 pr_warn("This system BIOS has enabled interrupt remapping\n"
Jiang Liu2966d952015-01-07 15:31:35 +0800697 "on a chipset that contains an erratum making that\n"
698 "feature unstable. To maintain system stability\n"
699 "interrupt remapping is being disabled. Please\n"
700 "contact your BIOS vendor for an update\n");
701 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Jiang Liu2966d952015-01-07 15:31:35 +0800702 return -ENODEV;
703 }
704
Thomas Gleixner11190302015-01-07 15:31:29 +0800705 if (dmar_table_init() < 0)
Jiang Liu2966d952015-01-07 15:31:35 +0800706 return -ENODEV;
707
708 if (!dmar_ir_support())
709 return -ENODEV;
Thomas Gleixner11190302015-01-07 15:31:29 +0800710
711 if (parse_ioapics_under_ir() != 1) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200712 pr_info("Not enabling interrupt remapping\n");
Thomas Gleixner11190302015-01-07 15:31:29 +0800713 goto error;
714 }
715
Joerg Roedel69cf1d82015-01-07 15:31:36 +0800716 /* First make sure all IOMMUs support IRQ remapping */
Jiang Liu2966d952015-01-07 15:31:35 +0800717 for_each_iommu(iommu, drhd)
Joerg Roedel69cf1d82015-01-07 15:31:36 +0800718 if (!ecap_ir_support(iommu->ecap))
Thomas Gleixner11190302015-01-07 15:31:29 +0800719 goto error;
Joerg Roedel69cf1d82015-01-07 15:31:36 +0800720
Joerg Roedel23256d02015-06-12 14:15:49 +0200721 /* Detect remapping mode: lapic or x2apic */
722 if (x2apic_supported()) {
723 eim = !dmar_x2apic_optout();
724 if (!eim) {
725 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
726 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
727 }
728 }
729
730 for_each_iommu(iommu, drhd) {
731 if (eim && !ecap_eim_support(iommu->ecap)) {
732 pr_info("%s does not support EIM\n", iommu->name);
733 eim = 0;
734 }
Joerg Roedelc676f582015-06-12 14:25:53 +0200735
736 /* Disable IRQ remapping if it is already enabled */
737 iommu_disable_irq_remapping(iommu);
Joerg Roedel23256d02015-06-12 14:15:49 +0200738 }
739
740 eim_mode = eim;
741 if (eim)
742 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
743
Joerg Roedel9e4e49d2015-06-12 14:23:56 +0200744 /* Do the initializations early */
745 for_each_iommu(iommu, drhd) {
746 if (intel_setup_irq_remapping(iommu)) {
747 pr_err("Failed to setup irq remapping for %s\n",
748 iommu->name);
Joerg Roedel69cf1d82015-01-07 15:31:36 +0800749 goto error;
Joerg Roedel9e4e49d2015-06-12 14:23:56 +0200750 }
751 }
Joerg Roedel69cf1d82015-01-07 15:31:36 +0800752
Thomas Gleixner11190302015-01-07 15:31:29 +0800753 return 0;
Jiang Liu2966d952015-01-07 15:31:35 +0800754
Thomas Gleixner11190302015-01-07 15:31:29 +0800755error:
756 intel_cleanup_irq_remapping();
Jiang Liu2966d952015-01-07 15:31:35 +0800757 return -ENODEV;
Thomas Gleixner11190302015-01-07 15:31:29 +0800758}
759
Suresh Siddha95a02e92012-03-30 11:47:07 -0700760static int __init intel_enable_irq_remapping(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700761{
762 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +0800763 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100764 bool setup = false;
Suresh Siddha41750d32011-08-23 17:05:18 -0700765
Suresh Siddha2ae21012008-07-10 11:16:43 -0700766 /*
767 * Setup Interrupt-remapping for all the DRHD's now.
768 */
Jiang Liu7c919772014-01-06 14:18:18 +0800769 for_each_iommu(iommu, drhd) {
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +0200770 iommu_enable_irq_remapping(iommu);
Quentin Lambert2f119c72015-02-06 10:59:53 +0100771 setup = true;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700772 }
773
774 if (!setup)
775 goto error;
776
Suresh Siddha95a02e92012-03-30 11:47:07 -0700777 irq_remapping_enabled = 1;
Joerg Roedelafcc8a42012-09-26 12:44:36 +0200778
779 /*
780 * VT-d has a different layout for IO-APIC entries when
781 * interrupt remapping is enabled. So it needs a special routine
782 * to print IO-APIC entries for debugging purposes too.
783 */
784 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
785
Joerg Roedel23256d02015-06-12 14:15:49 +0200786 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
Suresh Siddha2ae21012008-07-10 11:16:43 -0700787
Joerg Roedel23256d02015-06-12 14:15:49 +0200788 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700789
790error:
Thomas Gleixner11190302015-01-07 15:31:29 +0800791 intel_cleanup_irq_remapping();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700792 return -1;
793}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700794
Jiang Liua7a3dad2014-11-09 22:48:00 +0800795static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
796 struct intel_iommu *iommu,
797 struct acpi_dmar_hardware_unit *drhd)
Suresh Siddha20f30972009-08-04 12:07:08 -0700798{
799 struct acpi_dmar_pci_path *path;
800 u8 bus;
Jiang Liua7a3dad2014-11-09 22:48:00 +0800801 int count, free = -1;
Suresh Siddha20f30972009-08-04 12:07:08 -0700802
803 bus = scope->bus;
804 path = (struct acpi_dmar_pci_path *)(scope + 1);
805 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
806 / sizeof(struct acpi_dmar_pci_path);
807
808 while (--count > 0) {
809 /*
810 * Access PCI directly due to the PCI
811 * subsystem isn't initialized yet.
812 */
Lv Zhengfa5f5082013-10-31 09:30:22 +0800813 bus = read_pci_config_byte(bus, path->device, path->function,
Suresh Siddha20f30972009-08-04 12:07:08 -0700814 PCI_SECONDARY_BUS);
815 path++;
816 }
Jiang Liua7a3dad2014-11-09 22:48:00 +0800817
818 for (count = 0; count < MAX_HPET_TBS; count++) {
819 if (ir_hpet[count].iommu == iommu &&
820 ir_hpet[count].id == scope->enumeration_id)
821 return 0;
822 else if (ir_hpet[count].iommu == NULL && free == -1)
823 free = count;
824 }
825 if (free == -1) {
826 pr_warn("Exceeded Max HPET blocks\n");
827 return -ENOSPC;
828 }
829
830 ir_hpet[free].iommu = iommu;
831 ir_hpet[free].id = scope->enumeration_id;
832 ir_hpet[free].bus = bus;
833 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
834 pr_info("HPET id %d under DRHD base 0x%Lx\n",
835 scope->enumeration_id, drhd->address);
836
837 return 0;
Suresh Siddha20f30972009-08-04 12:07:08 -0700838}
839
Jiang Liua7a3dad2014-11-09 22:48:00 +0800840static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
841 struct intel_iommu *iommu,
842 struct acpi_dmar_hardware_unit *drhd)
Weidong Hanf007e992009-05-23 00:41:15 +0800843{
844 struct acpi_dmar_pci_path *path;
845 u8 bus;
Jiang Liua7a3dad2014-11-09 22:48:00 +0800846 int count, free = -1;
Weidong Hanf007e992009-05-23 00:41:15 +0800847
848 bus = scope->bus;
849 path = (struct acpi_dmar_pci_path *)(scope + 1);
850 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
851 / sizeof(struct acpi_dmar_pci_path);
852
853 while (--count > 0) {
854 /*
855 * Access PCI directly due to the PCI
856 * subsystem isn't initialized yet.
857 */
Lv Zhengfa5f5082013-10-31 09:30:22 +0800858 bus = read_pci_config_byte(bus, path->device, path->function,
Weidong Hanf007e992009-05-23 00:41:15 +0800859 PCI_SECONDARY_BUS);
860 path++;
861 }
862
Jiang Liua7a3dad2014-11-09 22:48:00 +0800863 for (count = 0; count < MAX_IO_APICS; count++) {
864 if (ir_ioapic[count].iommu == iommu &&
865 ir_ioapic[count].id == scope->enumeration_id)
866 return 0;
867 else if (ir_ioapic[count].iommu == NULL && free == -1)
868 free = count;
869 }
870 if (free == -1) {
871 pr_warn("Exceeded Max IO APICS\n");
872 return -ENOSPC;
873 }
874
875 ir_ioapic[free].bus = bus;
876 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
877 ir_ioapic[free].iommu = iommu;
878 ir_ioapic[free].id = scope->enumeration_id;
879 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
880 scope->enumeration_id, drhd->address, iommu->seq_id);
881
882 return 0;
Weidong Hanf007e992009-05-23 00:41:15 +0800883}
884
Suresh Siddha20f30972009-08-04 12:07:08 -0700885static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
886 struct intel_iommu *iommu)
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700887{
Jiang Liua7a3dad2014-11-09 22:48:00 +0800888 int ret = 0;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700889 struct acpi_dmar_hardware_unit *drhd;
890 struct acpi_dmar_device_scope *scope;
891 void *start, *end;
892
893 drhd = (struct acpi_dmar_hardware_unit *)header;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700894 start = (void *)(drhd + 1);
895 end = ((void *)drhd) + header->length;
896
Jiang Liua7a3dad2014-11-09 22:48:00 +0800897 while (start < end && ret == 0) {
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700898 scope = start;
Jiang Liua7a3dad2014-11-09 22:48:00 +0800899 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
900 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
901 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
902 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700903 start += scope->length;
904 }
905
Jiang Liua7a3dad2014-11-09 22:48:00 +0800906 return ret;
907}
908
909static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
910{
911 int i;
912
913 for (i = 0; i < MAX_HPET_TBS; i++)
914 if (ir_hpet[i].iommu == iommu)
915 ir_hpet[i].iommu = NULL;
916
917 for (i = 0; i < MAX_IO_APICS; i++)
918 if (ir_ioapic[i].iommu == iommu)
919 ir_ioapic[i].iommu = NULL;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700920}
921
922/*
923 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
924 * hardware unit.
925 */
Jiang Liu694835d2014-01-06 14:18:16 +0800926static int __init parse_ioapics_under_ir(void)
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700927{
928 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +0800929 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100930 bool ir_supported = false;
Seth Forshee32ab31e2012-08-08 08:27:03 -0500931 int ioapic_idx;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700932
Jiang Liu7c919772014-01-06 14:18:18 +0800933 for_each_iommu(iommu, drhd)
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700934 if (ecap_ir_support(iommu->ecap)) {
Suresh Siddha20f30972009-08-04 12:07:08 -0700935 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700936 return -1;
937
Quentin Lambert2f119c72015-02-06 10:59:53 +0100938 ir_supported = true;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700939 }
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700940
Seth Forshee32ab31e2012-08-08 08:27:03 -0500941 if (!ir_supported)
942 return 0;
943
944 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
945 int ioapic_id = mpc_ioapic_id(ioapic_idx);
946 if (!map_ioapic_to_ir(ioapic_id)) {
947 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
948 "interrupt remapping will be disabled\n",
949 ioapic_id);
950 return -1;
951 }
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700952 }
953
Seth Forshee32ab31e2012-08-08 08:27:03 -0500954 return 1;
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700955}
Fenghua Yub24696b2009-03-27 14:22:44 -0700956
Rashika Kheria6a7885c2013-12-18 12:04:27 +0530957static int __init ir_dev_scope_init(void)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700958{
Jiang Liu3a5670e2014-02-19 14:07:33 +0800959 int ret;
960
Suresh Siddha95a02e92012-03-30 11:47:07 -0700961 if (!irq_remapping_enabled)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700962 return 0;
963
Jiang Liu3a5670e2014-02-19 14:07:33 +0800964 down_write(&dmar_global_lock);
965 ret = dmar_dev_scope_init();
966 up_write(&dmar_global_lock);
967
968 return ret;
Suresh Siddhac2c72862011-08-23 17:05:19 -0700969}
970rootfs_initcall(ir_dev_scope_init);
971
Suresh Siddha95a02e92012-03-30 11:47:07 -0700972static void disable_irq_remapping(void)
Fenghua Yub24696b2009-03-27 14:22:44 -0700973{
974 struct dmar_drhd_unit *drhd;
975 struct intel_iommu *iommu = NULL;
976
977 /*
978 * Disable Interrupt-remapping for all the DRHD's now.
979 */
980 for_each_iommu(iommu, drhd) {
981 if (!ecap_ir_support(iommu->ecap))
982 continue;
983
Suresh Siddha95a02e92012-03-30 11:47:07 -0700984 iommu_disable_irq_remapping(iommu);
Fenghua Yub24696b2009-03-27 14:22:44 -0700985 }
986}
987
Suresh Siddha95a02e92012-03-30 11:47:07 -0700988static int reenable_irq_remapping(int eim)
Fenghua Yub24696b2009-03-27 14:22:44 -0700989{
990 struct dmar_drhd_unit *drhd;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100991 bool setup = false;
Fenghua Yub24696b2009-03-27 14:22:44 -0700992 struct intel_iommu *iommu = NULL;
993
994 for_each_iommu(iommu, drhd)
995 if (iommu->qi)
996 dmar_reenable_qi(iommu);
997
998 /*
999 * Setup Interrupt-remapping for all the DRHD's now.
1000 */
1001 for_each_iommu(iommu, drhd) {
1002 if (!ecap_ir_support(iommu->ecap))
1003 continue;
1004
1005 /* Set up interrupt remapping for iommu.*/
Suresh Siddha95a02e92012-03-30 11:47:07 -07001006 iommu_set_irq_remapping(iommu, eim);
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +02001007 iommu_enable_irq_remapping(iommu);
Quentin Lambert2f119c72015-02-06 10:59:53 +01001008 setup = true;
Fenghua Yub24696b2009-03-27 14:22:44 -07001009 }
1010
1011 if (!setup)
1012 goto error;
1013
1014 return 0;
1015
1016error:
1017 /*
1018 * handle error condition gracefully here!
1019 */
1020 return -1;
1021}
1022
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001023static void prepare_irte(struct irte *irte, int vector,
1024 unsigned int dest)
1025{
1026 memset(irte, 0, sizeof(*irte));
1027
1028 irte->present = 1;
1029 irte->dst_mode = apic->irq_dest_mode;
1030 /*
1031 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1032 * actual level or edge trigger will be setup in the IO-APIC
1033 * RTE. This will help simplify level triggered irq migration.
1034 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1035 * irq migration in the presence of interrupt-remapping.
1036 */
1037 irte->trigger_mode = 0;
1038 irte->dlvry_mode = apic->irq_delivery_mode;
1039 irte->vector = vector;
1040 irte->dest_id = IRTE_DEST(dest);
1041 irte->redir_hint = 1;
1042}
1043
1044static int intel_setup_ioapic_entry(int irq,
1045 struct IO_APIC_route_entry *route_entry,
1046 unsigned int destination, int vector,
1047 struct io_apic_irq_attr *attr)
1048{
1049 int ioapic_id = mpc_ioapic_id(attr->ioapic);
Jiang Liu3a5670e2014-02-19 14:07:33 +08001050 struct intel_iommu *iommu;
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001051 struct IR_IO_APIC_route_entry *entry;
1052 struct irte irte;
1053 int index;
1054
Jiang Liu3a5670e2014-02-19 14:07:33 +08001055 down_read(&dmar_global_lock);
1056 iommu = map_ioapic_to_ir(ioapic_id);
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001057 if (!iommu) {
1058 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
Jiang Liu3a5670e2014-02-19 14:07:33 +08001059 index = -ENODEV;
1060 } else {
1061 index = alloc_irte(iommu, irq, 1);
1062 if (index < 0) {
1063 pr_warn("Failed to allocate IRTE for ioapic %d\n",
1064 ioapic_id);
1065 index = -ENOMEM;
1066 }
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001067 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001068 up_read(&dmar_global_lock);
1069 if (index < 0)
1070 return index;
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001071
1072 prepare_irte(&irte, vector, destination);
1073
1074 /* Set source-id of interrupt request */
1075 set_ioapic_sid(&irte, ioapic_id);
1076
1077 modify_irte(irq, &irte);
1078
1079 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1080 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1081 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1082 "Avail:%X Vector:%02X Dest:%08X "
1083 "SID:%04X SQ:%X SVT:%X)\n",
1084 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
1085 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1086 irte.avail, irte.vector, irte.dest_id,
1087 irte.sid, irte.sq, irte.svt);
1088
Jiang Liu3a5670e2014-02-19 14:07:33 +08001089 entry = (struct IR_IO_APIC_route_entry *)route_entry;
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001090 memset(entry, 0, sizeof(*entry));
1091
1092 entry->index2 = (index >> 15) & 0x1;
1093 entry->zero = 0;
1094 entry->format = 1;
1095 entry->index = (index & 0x7fff);
1096 /*
1097 * IO-APIC RTE will be configured with virtual vector.
1098 * irq handler will do the explicit EOI to the io-apic.
1099 */
1100 entry->vector = attr->ioapic_pin;
1101 entry->mask = 0; /* enable IRQ */
1102 entry->trigger = attr->trigger;
1103 entry->polarity = attr->polarity;
1104
1105 /* Mask level triggered irqs.
1106 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1107 */
1108 if (attr->trigger)
1109 entry->mask = 1;
1110
1111 return 0;
1112}
1113
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001114/*
1115 * Migrate the IO-APIC irq in the presence of intr-remapping.
1116 *
1117 * For both level and edge triggered, irq migration is a simple atomic
1118 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1119 *
1120 * For level triggered, we eliminate the io-apic RTE modification (with the
1121 * updated vector information), by using a virtual vector (io-apic pin number).
1122 * Real vector that is used for interrupting cpu will be coming from
1123 * the interrupt-remapping table entry.
1124 *
1125 * As the migration is a simple atomic update of IRTE, the same mechanism
1126 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1127 */
1128static int
1129intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
1130 bool force)
1131{
Jiang Liu91411da2014-10-27 16:12:09 +08001132 struct irq_cfg *cfg = irqd_cfg(data);
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001133 unsigned int dest, irq = data->irq;
1134 struct irte irte;
Alexander Gordeevff164322012-06-07 15:15:59 +02001135 int err;
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001136
Suresh Siddha7eb9ae02012-06-14 18:28:49 -07001137 if (!config_enabled(CONFIG_SMP))
1138 return -EINVAL;
1139
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001140 if (!cpumask_intersects(mask, cpu_online_mask))
1141 return -EINVAL;
1142
1143 if (get_irte(irq, &irte))
1144 return -EBUSY;
1145
Alexander Gordeevff164322012-06-07 15:15:59 +02001146 err = assign_irq_vector(irq, cfg, mask);
1147 if (err)
1148 return err;
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001149
Alexander Gordeevff164322012-06-07 15:15:59 +02001150 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
1151 if (err) {
Dan Carpentered88bed2012-06-12 19:26:33 +03001152 if (assign_irq_vector(irq, cfg, data->affinity))
Alexander Gordeevff164322012-06-07 15:15:59 +02001153 pr_err("Failed to recover vector for irq %d\n", irq);
1154 return err;
1155 }
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001156
1157 irte.vector = cfg->vector;
1158 irte.dest_id = IRTE_DEST(dest);
1159
1160 /*
1161 * Atomically updates the IRTE with the new destination, vector
1162 * and flushes the interrupt entry cache.
1163 */
1164 modify_irte(irq, &irte);
1165
1166 /*
1167 * After this point, all the interrupts will start arriving
1168 * at the new destination. So, time to cleanup the previous
1169 * vector allocation.
1170 */
1171 if (cfg->move_in_progress)
1172 send_cleanup_vector(cfg);
1173
1174 cpumask_copy(data->affinity, mask);
1175 return 0;
1176}
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001177
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001178static void intel_compose_msi_msg(struct pci_dev *pdev,
1179 unsigned int irq, unsigned int dest,
1180 struct msi_msg *msg, u8 hpet_id)
1181{
1182 struct irq_cfg *cfg;
1183 struct irte irte;
Suresh Siddhac558df42012-05-08 00:08:54 -07001184 u16 sub_handle = 0;
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001185 int ir_index;
1186
Jiang Liu91411da2014-10-27 16:12:09 +08001187 cfg = irq_cfg(irq);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001188
1189 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
1190 BUG_ON(ir_index == -1);
1191
1192 prepare_irte(&irte, cfg->vector, dest);
1193
1194 /* Set source-id of interrupt request */
1195 if (pdev)
1196 set_msi_sid(&irte, pdev);
1197 else
1198 set_hpet_sid(&irte, hpet_id);
1199
1200 modify_irte(irq, &irte);
1201
1202 msg->address_hi = MSI_ADDR_BASE_HI;
1203 msg->data = sub_handle;
1204 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1205 MSI_ADDR_IR_SHV |
1206 MSI_ADDR_IR_INDEX1(ir_index) |
1207 MSI_ADDR_IR_INDEX2(ir_index);
1208}
1209
1210/*
1211 * Map the PCI dev to the corresponding remapping hardware unit
1212 * and allocate 'nvec' consecutive interrupt-remapping table entries
1213 * in it.
1214 */
1215static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1216{
1217 struct intel_iommu *iommu;
1218 int index;
1219
Jiang Liu3a5670e2014-02-19 14:07:33 +08001220 down_read(&dmar_global_lock);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001221 iommu = map_dev_to_ir(dev);
1222 if (!iommu) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001223 pr_err("Unable to map PCI %s to iommu\n", pci_name(dev));
Jiang Liu3a5670e2014-02-19 14:07:33 +08001224 index = -ENOENT;
1225 } else {
1226 index = alloc_irte(iommu, irq, nvec);
1227 if (index < 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001228 pr_err("Unable to allocate %d IRTE for PCI %s\n",
Jiang Liu3a5670e2014-02-19 14:07:33 +08001229 nvec, pci_name(dev));
1230 index = -ENOSPC;
1231 }
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001232 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001233 up_read(&dmar_global_lock);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001234
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001235 return index;
1236}
1237
1238static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1239 int index, int sub_handle)
1240{
1241 struct intel_iommu *iommu;
Jiang Liu3a5670e2014-02-19 14:07:33 +08001242 int ret = -ENOENT;
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001243
Jiang Liu3a5670e2014-02-19 14:07:33 +08001244 down_read(&dmar_global_lock);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001245 iommu = map_dev_to_ir(pdev);
Jiang Liu3a5670e2014-02-19 14:07:33 +08001246 if (iommu) {
1247 /*
1248 * setup the mapping between the irq and the IRTE
1249 * base index, the sub_handle pointing to the
1250 * appropriate interrupt remap table entry.
1251 */
1252 set_irte_irq(irq, iommu, index, sub_handle);
1253 ret = 0;
1254 }
1255 up_read(&dmar_global_lock);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001256
Jiang Liu3a5670e2014-02-19 14:07:33 +08001257 return ret;
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001258}
1259
Yijing Wang5fc24d82014-09-17 17:32:19 +08001260static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001261{
Jiang Liu3a5670e2014-02-19 14:07:33 +08001262 int ret = -1;
1263 struct intel_iommu *iommu;
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001264 int index;
1265
Jiang Liu3a5670e2014-02-19 14:07:33 +08001266 down_read(&dmar_global_lock);
1267 iommu = map_hpet_to_ir(id);
1268 if (iommu) {
1269 index = alloc_irte(iommu, irq, 1);
1270 if (index >= 0)
1271 ret = 0;
1272 }
1273 up_read(&dmar_global_lock);
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001274
Jiang Liu3a5670e2014-02-19 14:07:33 +08001275 return ret;
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001276}
1277
Joerg Roedel736baef2012-03-30 11:47:00 -07001278struct irq_remap_ops intel_irq_remap_ops = {
Thomas Gleixner11190302015-01-07 15:31:29 +08001279 .prepare = intel_prepare_irq_remapping,
Suresh Siddha95a02e92012-03-30 11:47:07 -07001280 .enable = intel_enable_irq_remapping,
1281 .disable = disable_irq_remapping,
1282 .reenable = reenable_irq_remapping,
Joerg Roedel4f3d8b62012-03-30 11:47:01 -07001283 .enable_faulting = enable_drhd_fault_handling,
Joerg Roedel0c3f1732012-03-30 11:47:02 -07001284 .setup_ioapic_entry = intel_setup_ioapic_entry,
Joerg Roedel4c1bad62012-03-30 11:47:03 -07001285 .set_affinity = intel_ioapic_set_affinity,
Joerg Roedel9d619f62012-03-30 11:47:04 -07001286 .free_irq = free_irte,
Joerg Roedel5e2b9302012-03-30 11:47:05 -07001287 .compose_msi_msg = intel_compose_msi_msg,
1288 .msi_alloc_irq = intel_msi_alloc_irq,
1289 .msi_setup_irq = intel_msi_setup_irq,
Yijing Wang5fc24d82014-09-17 17:32:19 +08001290 .alloc_hpet_msi = intel_alloc_hpet_msi,
Joerg Roedel736baef2012-03-30 11:47:00 -07001291};
Jiang Liu6b197242014-11-09 22:47:58 +08001292
Jiang Liua7a3dad2014-11-09 22:48:00 +08001293/*
1294 * Support of Interrupt Remapping Unit Hotplug
1295 */
1296static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1297{
1298 int ret;
1299 int eim = x2apic_enabled();
1300
1301 if (eim && !ecap_eim_support(iommu->ecap)) {
1302 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1303 iommu->reg_phys, iommu->ecap);
1304 return -ENODEV;
1305 }
1306
1307 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1308 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1309 iommu->reg_phys);
1310 return -ENODEV;
1311 }
1312
1313 /* TODO: check all IOAPICs are covered by IOMMU */
1314
1315 /* Setup Interrupt-remapping now. */
1316 ret = intel_setup_irq_remapping(iommu);
1317 if (ret) {
Joerg Roedel9e4e49d2015-06-12 14:23:56 +02001318 pr_err("Failed to setup irq remapping for %s\n",
1319 iommu->name);
Jiang Liua7a3dad2014-11-09 22:48:00 +08001320 intel_teardown_irq_remapping(iommu);
1321 ir_remove_ioapic_hpet_scope(iommu);
Joerg Roedel9e4e49d2015-06-12 14:23:56 +02001322 } else {
Joerg Roedeld4d1c0f2015-06-12 14:35:54 +02001323 iommu_enable_irq_remapping(iommu);
Jiang Liua7a3dad2014-11-09 22:48:00 +08001324 }
1325
1326 return ret;
1327}
1328
Jiang Liu6b197242014-11-09 22:47:58 +08001329int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1330{
Jiang Liua7a3dad2014-11-09 22:48:00 +08001331 int ret = 0;
1332 struct intel_iommu *iommu = dmaru->iommu;
1333
1334 if (!irq_remapping_enabled)
1335 return 0;
1336 if (iommu == NULL)
1337 return -EINVAL;
1338 if (!ecap_ir_support(iommu->ecap))
1339 return 0;
1340
1341 if (insert) {
1342 if (!iommu->ir_table)
1343 ret = dmar_ir_add(dmaru, iommu);
1344 } else {
1345 if (iommu->ir_table) {
1346 if (!bitmap_empty(iommu->ir_table->bitmap,
1347 INTR_REMAP_TABLE_ENTRIES)) {
1348 ret = -EBUSY;
1349 } else {
1350 iommu_disable_irq_remapping(iommu);
1351 intel_teardown_irq_remapping(iommu);
1352 ir_remove_ioapic_hpet_scope(iommu);
1353 }
1354 }
1355 }
1356
1357 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08001358}