blob: 5a57753ea9fcdfd47f6704bc488084ad94cb8a8a [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -08008#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +05309#include <asm/cpu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030010#include <linux/intel-iommu.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070011#include "intr_remapping.h"
12
13static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
14static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070015int intr_remapping_enabled;
16
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070017struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070018 struct intel_iommu *iommu;
19 u16 irte_index;
20 u16 sub_handle;
21 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070022};
23
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080024#ifdef CONFIG_SPARSE_IRQ
25static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
26{
27 struct irq_2_iommu *iommu;
28 int node;
29
30 node = cpu_to_node(cpu);
31
32 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
33 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
34
35 return iommu;
36}
Yinghai Lue420dfb2008-08-19 20:50:21 -070037
38static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
39{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080040 struct irq_desc *desc;
41
42 desc = irq_to_desc(irq);
43
44 if (WARN_ON_ONCE(!desc))
45 return NULL;
46
47 return desc->irq_2_iommu;
48}
49
50static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
51{
52 struct irq_desc *desc;
53 struct irq_2_iommu *irq_iommu;
54
55 /*
56 * alloc irq desc if not allocated already.
57 */
58 desc = irq_to_desc_alloc_cpu(irq, cpu);
59 if (!desc) {
60 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
61 return NULL;
62 }
63
64 irq_iommu = desc->irq_2_iommu;
65
66 if (!irq_iommu)
67 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
68
69 return desc->irq_2_iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -070070}
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020071
Yinghai Lue420dfb2008-08-19 20:50:21 -070072static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
73{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080074 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
75}
76
77#else /* !CONFIG_SPARSE_IRQ */
78
79static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
80
81static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
82{
83 if (irq < nr_irqs)
84 return &irq_2_iommuX[irq];
85
86 return NULL;
87}
88static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
89{
Yinghai Lue420dfb2008-08-19 20:50:21 -070090 return irq_2_iommu(irq);
91}
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080092#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -070093
94static DEFINE_SPINLOCK(irq_2_ir_lock);
95
Yinghai Lue420dfb2008-08-19 20:50:21 -070096static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
97{
98 struct irq_2_iommu *irq_iommu;
99
100 irq_iommu = irq_2_iommu(irq);
101
102 if (!irq_iommu)
103 return NULL;
104
105 if (!irq_iommu->iommu)
106 return NULL;
107
108 return irq_iommu;
109}
110
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700111int irq_remapped(int irq)
112{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700113 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700114}
115
116int get_irte(int irq, struct irte *entry)
117{
118 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700119 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700120
Yinghai Lue420dfb2008-08-19 20:50:21 -0700121 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700122 return -1;
123
124 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700125 irq_iommu = valid_irq_2_iommu(irq);
126 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700127 spin_unlock(&irq_2_ir_lock);
128 return -1;
129 }
130
Yinghai Lue420dfb2008-08-19 20:50:21 -0700131 index = irq_iommu->irte_index + irq_iommu->sub_handle;
132 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700133
134 spin_unlock(&irq_2_ir_lock);
135 return 0;
136}
137
138int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
139{
140 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700141 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700142 u16 index, start_index;
143 unsigned int mask = 0;
144 int i;
145
146 if (!count)
147 return -1;
148
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800149#ifndef CONFIG_SPARSE_IRQ
Yinghai Lue420dfb2008-08-19 20:50:21 -0700150 /* protect irq_2_iommu_alloc later */
151 if (irq >= nr_irqs)
152 return -1;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800153#endif
Yinghai Lue420dfb2008-08-19 20:50:21 -0700154
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700155 /*
156 * start the IRTE search from index 0.
157 */
158 index = start_index = 0;
159
160 if (count > 1) {
161 count = __roundup_pow_of_two(count);
162 mask = ilog2(count);
163 }
164
165 if (mask > ecap_max_handle_mask(iommu->ecap)) {
166 printk(KERN_ERR
167 "Requested mask %x exceeds the max invalidation handle"
168 " mask value %Lx\n", mask,
169 ecap_max_handle_mask(iommu->ecap));
170 return -1;
171 }
172
173 spin_lock(&irq_2_ir_lock);
174 do {
175 for (i = index; i < index + count; i++)
176 if (table->base[i].present)
177 break;
178 /* empty index found */
179 if (i == index + count)
180 break;
181
182 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
183
184 if (index == start_index) {
185 spin_unlock(&irq_2_ir_lock);
186 printk(KERN_ERR "can't allocate an IRTE\n");
187 return -1;
188 }
189 } while (1);
190
191 for (i = index; i < index + count; i++)
192 table->base[i].present = 1;
193
Yinghai Lue420dfb2008-08-19 20:50:21 -0700194 irq_iommu = irq_2_iommu_alloc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800195 if (!irq_iommu) {
196 spin_unlock(&irq_2_ir_lock);
197 printk(KERN_ERR "can't allocate irq_2_iommu\n");
198 return -1;
199 }
200
Yinghai Lue420dfb2008-08-19 20:50:21 -0700201 irq_iommu->iommu = iommu;
202 irq_iommu->irte_index = index;
203 irq_iommu->sub_handle = 0;
204 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700205
206 spin_unlock(&irq_2_ir_lock);
207
208 return index;
209}
210
211static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
212{
213 struct qi_desc desc;
214
215 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
216 | QI_IEC_SELECTIVE;
217 desc.high = 0;
218
219 qi_submit_sync(&desc, iommu);
220}
221
222int map_irq_to_irte_handle(int irq, u16 *sub_handle)
223{
224 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700225 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700226
227 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700228 irq_iommu = valid_irq_2_iommu(irq);
229 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700230 spin_unlock(&irq_2_ir_lock);
231 return -1;
232 }
233
Yinghai Lue420dfb2008-08-19 20:50:21 -0700234 *sub_handle = irq_iommu->sub_handle;
235 index = irq_iommu->irte_index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700236 spin_unlock(&irq_2_ir_lock);
237 return index;
238}
239
240int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
241{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700242 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700243
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700244 spin_lock(&irq_2_ir_lock);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700245
246 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700247
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800248 if (!irq_iommu) {
249 spin_unlock(&irq_2_ir_lock);
250 printk(KERN_ERR "can't allocate irq_2_iommu\n");
251 return -1;
252 }
253
Yinghai Lue420dfb2008-08-19 20:50:21 -0700254 irq_iommu->iommu = iommu;
255 irq_iommu->irte_index = index;
256 irq_iommu->sub_handle = subhandle;
257 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700258
259 spin_unlock(&irq_2_ir_lock);
260
261 return 0;
262}
263
264int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
265{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700266 struct irq_2_iommu *irq_iommu;
267
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700268 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700269 irq_iommu = valid_irq_2_iommu(irq);
270 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700271 spin_unlock(&irq_2_ir_lock);
272 return -1;
273 }
274
Yinghai Lue420dfb2008-08-19 20:50:21 -0700275 irq_iommu->iommu = NULL;
276 irq_iommu->irte_index = 0;
277 irq_iommu->sub_handle = 0;
278 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700279
280 spin_unlock(&irq_2_ir_lock);
281
282 return 0;
283}
284
285int modify_irte(int irq, struct irte *irte_modified)
286{
287 int index;
288 struct irte *irte;
289 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700290 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700291
292 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700293 irq_iommu = valid_irq_2_iommu(irq);
294 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700295 spin_unlock(&irq_2_ir_lock);
296 return -1;
297 }
298
Yinghai Lue420dfb2008-08-19 20:50:21 -0700299 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700300
Yinghai Lue420dfb2008-08-19 20:50:21 -0700301 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700302 irte = &iommu->ir_table->base[index];
303
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
305 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306
307 qi_flush_iec(iommu, index, 0);
308
309 spin_unlock(&irq_2_ir_lock);
310 return 0;
311}
312
313int flush_irte(int irq)
314{
315 int index;
316 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700317 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700318
319 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700320 irq_iommu = valid_irq_2_iommu(irq);
321 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700322 spin_unlock(&irq_2_ir_lock);
323 return -1;
324 }
325
Yinghai Lue420dfb2008-08-19 20:50:21 -0700326 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700327
Yinghai Lue420dfb2008-08-19 20:50:21 -0700328 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700329
Yinghai Lue420dfb2008-08-19 20:50:21 -0700330 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700331 spin_unlock(&irq_2_ir_lock);
332
333 return 0;
334}
335
Suresh Siddha89027d32008-07-10 11:16:56 -0700336struct intel_iommu *map_ioapic_to_ir(int apic)
337{
338 int i;
339
340 for (i = 0; i < MAX_IO_APICS; i++)
341 if (ir_ioapic[i].id == apic)
342 return ir_ioapic[i].iommu;
343 return NULL;
344}
345
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700346struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
347{
348 struct dmar_drhd_unit *drhd;
349
350 drhd = dmar_find_matched_drhd_unit(dev);
351 if (!drhd)
352 return NULL;
353
354 return drhd->iommu;
355}
356
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700357int free_irte(int irq)
358{
359 int index, i;
360 struct irte *irte;
361 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700362 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700363
364 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700365 irq_iommu = valid_irq_2_iommu(irq);
366 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700367 spin_unlock(&irq_2_ir_lock);
368 return -1;
369 }
370
Yinghai Lue420dfb2008-08-19 20:50:21 -0700371 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700372
Yinghai Lue420dfb2008-08-19 20:50:21 -0700373 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700374 irte = &iommu->ir_table->base[index];
375
Yinghai Lue420dfb2008-08-19 20:50:21 -0700376 if (!irq_iommu->sub_handle) {
377 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700378 set_64bit((unsigned long *)irte, 0);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700379 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700380 }
381
Yinghai Lue420dfb2008-08-19 20:50:21 -0700382 irq_iommu->iommu = NULL;
383 irq_iommu->irte_index = 0;
384 irq_iommu->sub_handle = 0;
385 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700386
387 spin_unlock(&irq_2_ir_lock);
388
389 return 0;
390}
391
Suresh Siddha2ae21012008-07-10 11:16:43 -0700392static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
393{
394 u64 addr;
395 u32 cmd, sts;
396 unsigned long flags;
397
398 addr = virt_to_phys((void *)iommu->ir_table->base);
399
400 spin_lock_irqsave(&iommu->register_lock, flags);
401
402 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
403 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
404
405 /* Set interrupt-remapping table pointer */
406 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
407 writel(cmd, iommu->reg + DMAR_GCMD_REG);
408
409 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
410 readl, (sts & DMA_GSTS_IRTPS), sts);
411 spin_unlock_irqrestore(&iommu->register_lock, flags);
412
413 /*
414 * global invalidation of interrupt entry cache before enabling
415 * interrupt-remapping.
416 */
417 qi_global_iec(iommu);
418
419 spin_lock_irqsave(&iommu->register_lock, flags);
420
421 /* Enable interrupt-remapping */
422 cmd = iommu->gcmd | DMA_GCMD_IRE;
423 iommu->gcmd |= DMA_GCMD_IRE;
424 writel(cmd, iommu->reg + DMAR_GCMD_REG);
425
426 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
427 readl, (sts & DMA_GSTS_IRES), sts);
428
429 spin_unlock_irqrestore(&iommu->register_lock, flags);
430}
431
432
433static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
434{
435 struct ir_table *ir_table;
436 struct page *pages;
437
438 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
439 GFP_KERNEL);
440
441 if (!iommu->ir_table)
442 return -ENOMEM;
443
444 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
445
446 if (!pages) {
447 printk(KERN_ERR "failed to allocate pages of order %d\n",
448 INTR_REMAP_PAGE_ORDER);
449 kfree(iommu->ir_table);
450 return -ENOMEM;
451 }
452
453 ir_table->base = page_address(pages);
454
455 iommu_set_intr_remapping(iommu, mode);
456 return 0;
457}
458
459int __init enable_intr_remapping(int eim)
460{
461 struct dmar_drhd_unit *drhd;
462 int setup = 0;
463
464 /*
465 * check for the Interrupt-remapping support
466 */
467 for_each_drhd_unit(drhd) {
468 struct intel_iommu *iommu = drhd->iommu;
469
470 if (!ecap_ir_support(iommu->ecap))
471 continue;
472
473 if (eim && !ecap_eim_support(iommu->ecap)) {
474 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
475 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
476 return -1;
477 }
478 }
479
480 /*
481 * Enable queued invalidation for all the DRHD's.
482 */
483 for_each_drhd_unit(drhd) {
484 int ret;
485 struct intel_iommu *iommu = drhd->iommu;
486 ret = dmar_enable_qi(iommu);
487
488 if (ret) {
489 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
490 " invalidation, ecap %Lx, ret %d\n",
491 drhd->reg_base_addr, iommu->ecap, ret);
492 return -1;
493 }
494 }
495
496 /*
497 * Setup Interrupt-remapping for all the DRHD's now.
498 */
499 for_each_drhd_unit(drhd) {
500 struct intel_iommu *iommu = drhd->iommu;
501
502 if (!ecap_ir_support(iommu->ecap))
503 continue;
504
505 if (setup_intr_remapping(iommu, eim))
506 goto error;
507
508 setup = 1;
509 }
510
511 if (!setup)
512 goto error;
513
514 intr_remapping_enabled = 1;
515
516 return 0;
517
518error:
519 /*
520 * handle error condition gracefully here!
521 */
522 return -1;
523}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700524
525static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
526 struct intel_iommu *iommu)
527{
528 struct acpi_dmar_hardware_unit *drhd;
529 struct acpi_dmar_device_scope *scope;
530 void *start, *end;
531
532 drhd = (struct acpi_dmar_hardware_unit *)header;
533
534 start = (void *)(drhd + 1);
535 end = ((void *)drhd) + header->length;
536
537 while (start < end) {
538 scope = start;
539 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
540 if (ir_ioapic_num == MAX_IO_APICS) {
541 printk(KERN_WARNING "Exceeded Max IO APICS\n");
542 return -1;
543 }
544
545 printk(KERN_INFO "IOAPIC id %d under DRHD base"
546 " 0x%Lx\n", scope->enumeration_id,
547 drhd->address);
548
549 ir_ioapic[ir_ioapic_num].iommu = iommu;
550 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
551 ir_ioapic_num++;
552 }
553 start += scope->length;
554 }
555
556 return 0;
557}
558
559/*
560 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
561 * hardware unit.
562 */
563int __init parse_ioapics_under_ir(void)
564{
565 struct dmar_drhd_unit *drhd;
566 int ir_supported = 0;
567
568 for_each_drhd_unit(drhd) {
569 struct intel_iommu *iommu = drhd->iommu;
570
571 if (ecap_ir_support(iommu->ecap)) {
572 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
573 return -1;
574
575 ir_supported = 1;
576 }
577 }
578
579 if (ir_supported && ir_ioapic_num != nr_ioapics) {
580 printk(KERN_WARNING
581 "Not all IO-APIC's listed under remapping hardware\n");
582 return -1;
583 }
584
585 return ir_supported;
586}