blob: 23372c811159610838da7005f08828af874ae2bb [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
8#include "intel-iommu.h"
9#include "intr_remapping.h"
10
11static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070013int intr_remapping_enabled;
14
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070015struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070016 struct intel_iommu *iommu;
17 u16 irte_index;
18 u16 sub_handle;
19 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070020};
21
Yinghai Lue420dfb2008-08-19 20:50:21 -070022#ifdef CONFIG_HAVE_SPARSE_IRQ
23static struct irq_2_iommu *irq_2_iommuX;
24/* fill one page ? */
25static int nr_irq_2_iommu = 0x100;
26static int irq_2_iommu_index;
27DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
28
29extern void *__alloc_bootmem_nopanic(unsigned long size,
30 unsigned long align,
31 unsigned long goal);
32
33static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
34{
35 struct irq_2_iommu *iommu;
36 unsigned long total_bytes;
37
38 if (irq_2_iommu_index >= nr_irq_2_iommu) {
39 /*
40 * we run out of pre-allocate ones, allocate more
41 */
42 printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
43
44 total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
45
46 if (after_bootmem)
47 iommu = kzalloc(total_bytes, GFP_ATOMIC);
48 else
49 iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
50
51 if (!iommu)
52 panic("can not get more irq_2_iommu\n");
53
54 irq_2_iommuX = iommu;
55 irq_2_iommu_index = 0;
56 }
57
58 iommu = &irq_2_iommuX[irq_2_iommu_index];
59 irq_2_iommu_index++;
60 return iommu;
61}
62
63static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
64{
65 struct irq_desc *desc;
66
67 desc = irq_to_desc(irq);
68
69 BUG_ON(!desc);
70
71 return desc->irq_2_iommu;
72}
73
74static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
75{
76 struct irq_desc *desc;
77 struct irq_2_iommu *irq_iommu;
78
79 desc = irq_to_desc(irq);
80
81 BUG_ON(!desc);
82
83 irq_iommu = desc->irq_2_iommu;
84
85 if (!irq_iommu)
86 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
87
88 return desc->irq_2_iommu;
89}
90
91#else /* !CONFIG_HAVE_SPARSE_IRQ */
92
93#ifdef CONFIG_HAVE_DYN_ARRAY
94static struct irq_2_iommu *irq_2_iommuX;
95DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070096#else
Yinghai Lue420dfb2008-08-19 20:50:21 -070097static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
98#endif
99
100static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
101{
102 if (irq < nr_irqs)
103 return &irq_2_iommuX[irq];
104
105 return NULL;
106}
107static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
108{
109 return irq_2_iommu(irq);
110}
Yinghai Lu5aeecaf2008-08-19 20:49:59 -0700111#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700112
113static DEFINE_SPINLOCK(irq_2_ir_lock);
114
Yinghai Lue420dfb2008-08-19 20:50:21 -0700115static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
116{
117 struct irq_2_iommu *irq_iommu;
118
119 irq_iommu = irq_2_iommu(irq);
120
121 if (!irq_iommu)
122 return NULL;
123
124 if (!irq_iommu->iommu)
125 return NULL;
126
127 return irq_iommu;
128}
129
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700130int irq_remapped(int irq)
131{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700132 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700133}
134
135int get_irte(int irq, struct irte *entry)
136{
137 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700138 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700139
Yinghai Lue420dfb2008-08-19 20:50:21 -0700140 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700141 return -1;
142
143 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700144 irq_iommu = valid_irq_2_iommu(irq);
145 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700146 spin_unlock(&irq_2_ir_lock);
147 return -1;
148 }
149
Yinghai Lue420dfb2008-08-19 20:50:21 -0700150 index = irq_iommu->irte_index + irq_iommu->sub_handle;
151 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700152
153 spin_unlock(&irq_2_ir_lock);
154 return 0;
155}
156
157int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
158{
159 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700160 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700161 u16 index, start_index;
162 unsigned int mask = 0;
163 int i;
164
165 if (!count)
166 return -1;
167
Yinghai Lue420dfb2008-08-19 20:50:21 -0700168#ifndef CONFIG_HAVE_SPARSE_IRQ
169 /* protect irq_2_iommu_alloc later */
170 if (irq >= nr_irqs)
171 return -1;
172#endif
173
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700174 /*
175 * start the IRTE search from index 0.
176 */
177 index = start_index = 0;
178
179 if (count > 1) {
180 count = __roundup_pow_of_two(count);
181 mask = ilog2(count);
182 }
183
184 if (mask > ecap_max_handle_mask(iommu->ecap)) {
185 printk(KERN_ERR
186 "Requested mask %x exceeds the max invalidation handle"
187 " mask value %Lx\n", mask,
188 ecap_max_handle_mask(iommu->ecap));
189 return -1;
190 }
191
192 spin_lock(&irq_2_ir_lock);
193 do {
194 for (i = index; i < index + count; i++)
195 if (table->base[i].present)
196 break;
197 /* empty index found */
198 if (i == index + count)
199 break;
200
201 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
202
203 if (index == start_index) {
204 spin_unlock(&irq_2_ir_lock);
205 printk(KERN_ERR "can't allocate an IRTE\n");
206 return -1;
207 }
208 } while (1);
209
210 for (i = index; i < index + count; i++)
211 table->base[i].present = 1;
212
Yinghai Lue420dfb2008-08-19 20:50:21 -0700213 irq_iommu = irq_2_iommu_alloc(irq);
214 irq_iommu->iommu = iommu;
215 irq_iommu->irte_index = index;
216 irq_iommu->sub_handle = 0;
217 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700218
219 spin_unlock(&irq_2_ir_lock);
220
221 return index;
222}
223
224static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
225{
226 struct qi_desc desc;
227
228 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
229 | QI_IEC_SELECTIVE;
230 desc.high = 0;
231
232 qi_submit_sync(&desc, iommu);
233}
234
235int map_irq_to_irte_handle(int irq, u16 *sub_handle)
236{
237 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700238 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700239
240 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700241 irq_iommu = valid_irq_2_iommu(irq);
242 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700243 spin_unlock(&irq_2_ir_lock);
244 return -1;
245 }
246
Yinghai Lue420dfb2008-08-19 20:50:21 -0700247 *sub_handle = irq_iommu->sub_handle;
248 index = irq_iommu->irte_index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700249 spin_unlock(&irq_2_ir_lock);
250 return index;
251}
252
253int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
254{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700255 struct irq_2_iommu *irq_iommu;
256
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700257 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700258 irq_iommu = valid_irq_2_iommu(irq);
259 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700260 spin_unlock(&irq_2_ir_lock);
261 return -1;
262 }
263
Yinghai Lue420dfb2008-08-19 20:50:21 -0700264 irq_iommu->iommu = iommu;
265 irq_iommu->irte_index = index;
266 irq_iommu->sub_handle = subhandle;
267 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700268
269 spin_unlock(&irq_2_ir_lock);
270
271 return 0;
272}
273
274int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
275{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700276 struct irq_2_iommu *irq_iommu;
277
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700278 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700279 irq_iommu = valid_irq_2_iommu(irq);
280 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700281 spin_unlock(&irq_2_ir_lock);
282 return -1;
283 }
284
Yinghai Lue420dfb2008-08-19 20:50:21 -0700285 irq_iommu->iommu = NULL;
286 irq_iommu->irte_index = 0;
287 irq_iommu->sub_handle = 0;
288 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700289
290 spin_unlock(&irq_2_ir_lock);
291
292 return 0;
293}
294
295int modify_irte(int irq, struct irte *irte_modified)
296{
297 int index;
298 struct irte *irte;
299 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700300 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700301
302 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700303 irq_iommu = valid_irq_2_iommu(irq);
304 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700305 spin_unlock(&irq_2_ir_lock);
306 return -1;
307 }
308
Yinghai Lue420dfb2008-08-19 20:50:21 -0700309 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700310
Yinghai Lue420dfb2008-08-19 20:50:21 -0700311 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700312 irte = &iommu->ir_table->base[index];
313
314 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
315 __iommu_flush_cache(iommu, irte, sizeof(*irte));
316
317 qi_flush_iec(iommu, index, 0);
318
319 spin_unlock(&irq_2_ir_lock);
320 return 0;
321}
322
323int flush_irte(int irq)
324{
325 int index;
326 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700327 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700328
329 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700330 irq_iommu = valid_irq_2_iommu(irq);
331 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700332 spin_unlock(&irq_2_ir_lock);
333 return -1;
334 }
335
Yinghai Lue420dfb2008-08-19 20:50:21 -0700336 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700337
Yinghai Lue420dfb2008-08-19 20:50:21 -0700338 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700339
Yinghai Lue420dfb2008-08-19 20:50:21 -0700340 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700341 spin_unlock(&irq_2_ir_lock);
342
343 return 0;
344}
345
Suresh Siddha89027d32008-07-10 11:16:56 -0700346struct intel_iommu *map_ioapic_to_ir(int apic)
347{
348 int i;
349
350 for (i = 0; i < MAX_IO_APICS; i++)
351 if (ir_ioapic[i].id == apic)
352 return ir_ioapic[i].iommu;
353 return NULL;
354}
355
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700356struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
357{
358 struct dmar_drhd_unit *drhd;
359
360 drhd = dmar_find_matched_drhd_unit(dev);
361 if (!drhd)
362 return NULL;
363
364 return drhd->iommu;
365}
366
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700367int free_irte(int irq)
368{
369 int index, i;
370 struct irte *irte;
371 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700372 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700373
374 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700375 irq_iommu = valid_irq_2_iommu(irq);
376 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700377 spin_unlock(&irq_2_ir_lock);
378 return -1;
379 }
380
Yinghai Lue420dfb2008-08-19 20:50:21 -0700381 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700382
Yinghai Lue420dfb2008-08-19 20:50:21 -0700383 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700384 irte = &iommu->ir_table->base[index];
385
Yinghai Lue420dfb2008-08-19 20:50:21 -0700386 if (!irq_iommu->sub_handle) {
387 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700388 set_64bit((unsigned long *)irte, 0);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700389 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700390 }
391
Yinghai Lue420dfb2008-08-19 20:50:21 -0700392 irq_iommu->iommu = NULL;
393 irq_iommu->irte_index = 0;
394 irq_iommu->sub_handle = 0;
395 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700396
397 spin_unlock(&irq_2_ir_lock);
398
399 return 0;
400}
401
Suresh Siddha2ae21012008-07-10 11:16:43 -0700402static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
403{
404 u64 addr;
405 u32 cmd, sts;
406 unsigned long flags;
407
408 addr = virt_to_phys((void *)iommu->ir_table->base);
409
410 spin_lock_irqsave(&iommu->register_lock, flags);
411
412 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
413 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
414
415 /* Set interrupt-remapping table pointer */
416 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
417 writel(cmd, iommu->reg + DMAR_GCMD_REG);
418
419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
420 readl, (sts & DMA_GSTS_IRTPS), sts);
421 spin_unlock_irqrestore(&iommu->register_lock, flags);
422
423 /*
424 * global invalidation of interrupt entry cache before enabling
425 * interrupt-remapping.
426 */
427 qi_global_iec(iommu);
428
429 spin_lock_irqsave(&iommu->register_lock, flags);
430
431 /* Enable interrupt-remapping */
432 cmd = iommu->gcmd | DMA_GCMD_IRE;
433 iommu->gcmd |= DMA_GCMD_IRE;
434 writel(cmd, iommu->reg + DMAR_GCMD_REG);
435
436 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
437 readl, (sts & DMA_GSTS_IRES), sts);
438
439 spin_unlock_irqrestore(&iommu->register_lock, flags);
440}
441
442
443static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
444{
445 struct ir_table *ir_table;
446 struct page *pages;
447
448 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
449 GFP_KERNEL);
450
451 if (!iommu->ir_table)
452 return -ENOMEM;
453
454 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
455
456 if (!pages) {
457 printk(KERN_ERR "failed to allocate pages of order %d\n",
458 INTR_REMAP_PAGE_ORDER);
459 kfree(iommu->ir_table);
460 return -ENOMEM;
461 }
462
463 ir_table->base = page_address(pages);
464
465 iommu_set_intr_remapping(iommu, mode);
466 return 0;
467}
468
469int __init enable_intr_remapping(int eim)
470{
471 struct dmar_drhd_unit *drhd;
472 int setup = 0;
473
474 /*
475 * check for the Interrupt-remapping support
476 */
477 for_each_drhd_unit(drhd) {
478 struct intel_iommu *iommu = drhd->iommu;
479
480 if (!ecap_ir_support(iommu->ecap))
481 continue;
482
483 if (eim && !ecap_eim_support(iommu->ecap)) {
484 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
485 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
486 return -1;
487 }
488 }
489
490 /*
491 * Enable queued invalidation for all the DRHD's.
492 */
493 for_each_drhd_unit(drhd) {
494 int ret;
495 struct intel_iommu *iommu = drhd->iommu;
496 ret = dmar_enable_qi(iommu);
497
498 if (ret) {
499 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
500 " invalidation, ecap %Lx, ret %d\n",
501 drhd->reg_base_addr, iommu->ecap, ret);
502 return -1;
503 }
504 }
505
506 /*
507 * Setup Interrupt-remapping for all the DRHD's now.
508 */
509 for_each_drhd_unit(drhd) {
510 struct intel_iommu *iommu = drhd->iommu;
511
512 if (!ecap_ir_support(iommu->ecap))
513 continue;
514
515 if (setup_intr_remapping(iommu, eim))
516 goto error;
517
518 setup = 1;
519 }
520
521 if (!setup)
522 goto error;
523
524 intr_remapping_enabled = 1;
525
526 return 0;
527
528error:
529 /*
530 * handle error condition gracefully here!
531 */
532 return -1;
533}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700534
535static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
536 struct intel_iommu *iommu)
537{
538 struct acpi_dmar_hardware_unit *drhd;
539 struct acpi_dmar_device_scope *scope;
540 void *start, *end;
541
542 drhd = (struct acpi_dmar_hardware_unit *)header;
543
544 start = (void *)(drhd + 1);
545 end = ((void *)drhd) + header->length;
546
547 while (start < end) {
548 scope = start;
549 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
550 if (ir_ioapic_num == MAX_IO_APICS) {
551 printk(KERN_WARNING "Exceeded Max IO APICS\n");
552 return -1;
553 }
554
555 printk(KERN_INFO "IOAPIC id %d under DRHD base"
556 " 0x%Lx\n", scope->enumeration_id,
557 drhd->address);
558
559 ir_ioapic[ir_ioapic_num].iommu = iommu;
560 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
561 ir_ioapic_num++;
562 }
563 start += scope->length;
564 }
565
566 return 0;
567}
568
569/*
570 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
571 * hardware unit.
572 */
573int __init parse_ioapics_under_ir(void)
574{
575 struct dmar_drhd_unit *drhd;
576 int ir_supported = 0;
577
578 for_each_drhd_unit(drhd) {
579 struct intel_iommu *iommu = drhd->iommu;
580
581 if (ecap_ir_support(iommu->ecap)) {
582 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
583 return -1;
584
585 ir_supported = 1;
586 }
587 }
588
589 if (ir_supported && ir_ioapic_num != nr_ioapics) {
590 printk(KERN_WARNING
591 "Not all IO-APIC's listed under remapping hardware\n");
592 return -1;
593 }
594
595 return ir_supported;
596}