blob: bddb4b19b6c7c1da47b571288fdf989ba7ffc701 [file] [log] [blame]
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07001#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07002#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07005#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07006#include <asm/io_apic.h>
7#include "intel-iommu.h"
8#include "intr_remapping.h"
9
10static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
11static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070012int intr_remapping_enabled;
13
Suresh Siddhab6fcb332008-07-10 11:16:44 -070014static struct {
15 struct intel_iommu *iommu;
16 u16 irte_index;
17 u16 sub_handle;
18 u8 irte_mask;
19} irq_2_iommu[NR_IRQS];
20
21static DEFINE_SPINLOCK(irq_2_ir_lock);
22
23int irq_remapped(int irq)
24{
25 if (irq > NR_IRQS)
26 return 0;
27
28 if (!irq_2_iommu[irq].iommu)
29 return 0;
30
31 return 1;
32}
33
34int get_irte(int irq, struct irte *entry)
35{
36 int index;
37
38 if (!entry || irq > NR_IRQS)
39 return -1;
40
41 spin_lock(&irq_2_ir_lock);
42 if (!irq_2_iommu[irq].iommu) {
43 spin_unlock(&irq_2_ir_lock);
44 return -1;
45 }
46
47 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
48 *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
49
50 spin_unlock(&irq_2_ir_lock);
51 return 0;
52}
53
54int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
55{
56 struct ir_table *table = iommu->ir_table;
57 u16 index, start_index;
58 unsigned int mask = 0;
59 int i;
60
61 if (!count)
62 return -1;
63
64 /*
65 * start the IRTE search from index 0.
66 */
67 index = start_index = 0;
68
69 if (count > 1) {
70 count = __roundup_pow_of_two(count);
71 mask = ilog2(count);
72 }
73
74 if (mask > ecap_max_handle_mask(iommu->ecap)) {
75 printk(KERN_ERR
76 "Requested mask %x exceeds the max invalidation handle"
77 " mask value %Lx\n", mask,
78 ecap_max_handle_mask(iommu->ecap));
79 return -1;
80 }
81
82 spin_lock(&irq_2_ir_lock);
83 do {
84 for (i = index; i < index + count; i++)
85 if (table->base[i].present)
86 break;
87 /* empty index found */
88 if (i == index + count)
89 break;
90
91 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
92
93 if (index == start_index) {
94 spin_unlock(&irq_2_ir_lock);
95 printk(KERN_ERR "can't allocate an IRTE\n");
96 return -1;
97 }
98 } while (1);
99
100 for (i = index; i < index + count; i++)
101 table->base[i].present = 1;
102
103 irq_2_iommu[irq].iommu = iommu;
104 irq_2_iommu[irq].irte_index = index;
105 irq_2_iommu[irq].sub_handle = 0;
106 irq_2_iommu[irq].irte_mask = mask;
107
108 spin_unlock(&irq_2_ir_lock);
109
110 return index;
111}
112
113static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
114{
115 struct qi_desc desc;
116
117 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
118 | QI_IEC_SELECTIVE;
119 desc.high = 0;
120
121 qi_submit_sync(&desc, iommu);
122}
123
124int map_irq_to_irte_handle(int irq, u16 *sub_handle)
125{
126 int index;
127
128 spin_lock(&irq_2_ir_lock);
129 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
130 spin_unlock(&irq_2_ir_lock);
131 return -1;
132 }
133
134 *sub_handle = irq_2_iommu[irq].sub_handle;
135 index = irq_2_iommu[irq].irte_index;
136 spin_unlock(&irq_2_ir_lock);
137 return index;
138}
139
140int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
141{
142 spin_lock(&irq_2_ir_lock);
143 if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
144 spin_unlock(&irq_2_ir_lock);
145 return -1;
146 }
147
148 irq_2_iommu[irq].iommu = iommu;
149 irq_2_iommu[irq].irte_index = index;
150 irq_2_iommu[irq].sub_handle = subhandle;
151 irq_2_iommu[irq].irte_mask = 0;
152
153 spin_unlock(&irq_2_ir_lock);
154
155 return 0;
156}
157
158int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
159{
160 spin_lock(&irq_2_ir_lock);
161 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
162 spin_unlock(&irq_2_ir_lock);
163 return -1;
164 }
165
166 irq_2_iommu[irq].iommu = NULL;
167 irq_2_iommu[irq].irte_index = 0;
168 irq_2_iommu[irq].sub_handle = 0;
169 irq_2_iommu[irq].irte_mask = 0;
170
171 spin_unlock(&irq_2_ir_lock);
172
173 return 0;
174}
175
176int modify_irte(int irq, struct irte *irte_modified)
177{
178 int index;
179 struct irte *irte;
180 struct intel_iommu *iommu;
181
182 spin_lock(&irq_2_ir_lock);
183 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
184 spin_unlock(&irq_2_ir_lock);
185 return -1;
186 }
187
188 iommu = irq_2_iommu[irq].iommu;
189
190 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
191 irte = &iommu->ir_table->base[index];
192
193 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
194 __iommu_flush_cache(iommu, irte, sizeof(*irte));
195
196 qi_flush_iec(iommu, index, 0);
197
198 spin_unlock(&irq_2_ir_lock);
199 return 0;
200}
201
202int flush_irte(int irq)
203{
204 int index;
205 struct intel_iommu *iommu;
206
207 spin_lock(&irq_2_ir_lock);
208 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
209 spin_unlock(&irq_2_ir_lock);
210 return -1;
211 }
212
213 iommu = irq_2_iommu[irq].iommu;
214
215 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
216
217 qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
218 spin_unlock(&irq_2_ir_lock);
219
220 return 0;
221}
222
223int free_irte(int irq)
224{
225 int index, i;
226 struct irte *irte;
227 struct intel_iommu *iommu;
228
229 spin_lock(&irq_2_ir_lock);
230 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
231 spin_unlock(&irq_2_ir_lock);
232 return -1;
233 }
234
235 iommu = irq_2_iommu[irq].iommu;
236
237 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
238 irte = &iommu->ir_table->base[index];
239
240 if (!irq_2_iommu[irq].sub_handle) {
241 for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
242 set_64bit((unsigned long *)irte, 0);
243 qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
244 }
245
246 irq_2_iommu[irq].iommu = NULL;
247 irq_2_iommu[irq].irte_index = 0;
248 irq_2_iommu[irq].sub_handle = 0;
249 irq_2_iommu[irq].irte_mask = 0;
250
251 spin_unlock(&irq_2_ir_lock);
252
253 return 0;
254}
255
Suresh Siddha2ae21012008-07-10 11:16:43 -0700256static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
257{
258 u64 addr;
259 u32 cmd, sts;
260 unsigned long flags;
261
262 addr = virt_to_phys((void *)iommu->ir_table->base);
263
264 spin_lock_irqsave(&iommu->register_lock, flags);
265
266 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
267 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
268
269 /* Set interrupt-remapping table pointer */
270 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
271 writel(cmd, iommu->reg + DMAR_GCMD_REG);
272
273 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
274 readl, (sts & DMA_GSTS_IRTPS), sts);
275 spin_unlock_irqrestore(&iommu->register_lock, flags);
276
277 /*
278 * global invalidation of interrupt entry cache before enabling
279 * interrupt-remapping.
280 */
281 qi_global_iec(iommu);
282
283 spin_lock_irqsave(&iommu->register_lock, flags);
284
285 /* Enable interrupt-remapping */
286 cmd = iommu->gcmd | DMA_GCMD_IRE;
287 iommu->gcmd |= DMA_GCMD_IRE;
288 writel(cmd, iommu->reg + DMAR_GCMD_REG);
289
290 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
291 readl, (sts & DMA_GSTS_IRES), sts);
292
293 spin_unlock_irqrestore(&iommu->register_lock, flags);
294}
295
296
297static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
298{
299 struct ir_table *ir_table;
300 struct page *pages;
301
302 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
303 GFP_KERNEL);
304
305 if (!iommu->ir_table)
306 return -ENOMEM;
307
308 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
309
310 if (!pages) {
311 printk(KERN_ERR "failed to allocate pages of order %d\n",
312 INTR_REMAP_PAGE_ORDER);
313 kfree(iommu->ir_table);
314 return -ENOMEM;
315 }
316
317 ir_table->base = page_address(pages);
318
319 iommu_set_intr_remapping(iommu, mode);
320 return 0;
321}
322
323int __init enable_intr_remapping(int eim)
324{
325 struct dmar_drhd_unit *drhd;
326 int setup = 0;
327
328 /*
329 * check for the Interrupt-remapping support
330 */
331 for_each_drhd_unit(drhd) {
332 struct intel_iommu *iommu = drhd->iommu;
333
334 if (!ecap_ir_support(iommu->ecap))
335 continue;
336
337 if (eim && !ecap_eim_support(iommu->ecap)) {
338 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
339 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
340 return -1;
341 }
342 }
343
344 /*
345 * Enable queued invalidation for all the DRHD's.
346 */
347 for_each_drhd_unit(drhd) {
348 int ret;
349 struct intel_iommu *iommu = drhd->iommu;
350 ret = dmar_enable_qi(iommu);
351
352 if (ret) {
353 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
354 " invalidation, ecap %Lx, ret %d\n",
355 drhd->reg_base_addr, iommu->ecap, ret);
356 return -1;
357 }
358 }
359
360 /*
361 * Setup Interrupt-remapping for all the DRHD's now.
362 */
363 for_each_drhd_unit(drhd) {
364 struct intel_iommu *iommu = drhd->iommu;
365
366 if (!ecap_ir_support(iommu->ecap))
367 continue;
368
369 if (setup_intr_remapping(iommu, eim))
370 goto error;
371
372 setup = 1;
373 }
374
375 if (!setup)
376 goto error;
377
378 intr_remapping_enabled = 1;
379
380 return 0;
381
382error:
383 /*
384 * handle error condition gracefully here!
385 */
386 return -1;
387}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700388
389static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
390 struct intel_iommu *iommu)
391{
392 struct acpi_dmar_hardware_unit *drhd;
393 struct acpi_dmar_device_scope *scope;
394 void *start, *end;
395
396 drhd = (struct acpi_dmar_hardware_unit *)header;
397
398 start = (void *)(drhd + 1);
399 end = ((void *)drhd) + header->length;
400
401 while (start < end) {
402 scope = start;
403 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
404 if (ir_ioapic_num == MAX_IO_APICS) {
405 printk(KERN_WARNING "Exceeded Max IO APICS\n");
406 return -1;
407 }
408
409 printk(KERN_INFO "IOAPIC id %d under DRHD base"
410 " 0x%Lx\n", scope->enumeration_id,
411 drhd->address);
412
413 ir_ioapic[ir_ioapic_num].iommu = iommu;
414 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
415 ir_ioapic_num++;
416 }
417 start += scope->length;
418 }
419
420 return 0;
421}
422
423/*
424 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
425 * hardware unit.
426 */
427int __init parse_ioapics_under_ir(void)
428{
429 struct dmar_drhd_unit *drhd;
430 int ir_supported = 0;
431
432 for_each_drhd_unit(drhd) {
433 struct intel_iommu *iommu = drhd->iommu;
434
435 if (ecap_ir_support(iommu->ecap)) {
436 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
437 return -1;
438
439 ir_supported = 1;
440 }
441 }
442
443 if (ir_supported && ir_ioapic_num != nr_ioapics) {
444 printk(KERN_WARNING
445 "Not all IO-APIC's listed under remapping hardware\n");
446 return -1;
447 }
448
449 return ir_supported;
450}