Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 1 | |
| 2 | #define pr_fmt(fmt) "DMAR-IR: " fmt |
| 3 | |
Yinghai Lu | 5aeecaf | 2008-08-19 20:49:59 -0700 | [diff] [blame] | 4 | #include <linux/interrupt.h> |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 5 | #include <linux/dmar.h> |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 6 | #include <linux/spinlock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 7 | #include <linux/slab.h> |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 8 | #include <linux/jiffies.h> |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 9 | #include <linux/hpet.h> |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 10 | #include <linux/pci.h> |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 11 | #include <linux/irq.h> |
Lv Zheng | 8b48463 | 2013-12-03 08:49:16 +0800 | [diff] [blame] | 12 | #include <linux/intel-iommu.h> |
| 13 | #include <linux/acpi.h> |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 14 | #include <asm/io_apic.h> |
Yinghai Lu | 17483a1 | 2008-12-12 13:14:18 -0800 | [diff] [blame] | 15 | #include <asm/smp.h> |
Jaswinder Singh Rajput | 6d652ea | 2009-01-07 21:38:59 +0530 | [diff] [blame] | 16 | #include <asm/cpu.h> |
Suresh Siddha | 8a8f422 | 2012-03-30 11:47:08 -0700 | [diff] [blame] | 17 | #include <asm/irq_remapping.h> |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 18 | #include <asm/pci-direct.h> |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 19 | #include <asm/msidef.h> |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 20 | |
Suresh Siddha | 8a8f422 | 2012-03-30 11:47:08 -0700 | [diff] [blame] | 21 | #include "irq_remapping.h" |
Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 22 | |
Joerg Roedel | eef93fd | 2012-03-30 11:46:59 -0700 | [diff] [blame] | 23 | struct ioapic_scope { |
| 24 | struct intel_iommu *iommu; |
| 25 | unsigned int id; |
| 26 | unsigned int bus; /* PCI bus number */ |
| 27 | unsigned int devfn; /* PCI devfn number */ |
| 28 | }; |
| 29 | |
| 30 | struct hpet_scope { |
| 31 | struct intel_iommu *iommu; |
| 32 | u8 id; |
| 33 | unsigned int bus; |
| 34 | unsigned int devfn; |
| 35 | }; |
| 36 | |
| 37 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
Jiang Liu | 13d09b6 | 2015-01-07 15:31:37 +0800 | [diff] [blame] | 38 | #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8) |
Joerg Roedel | eef93fd | 2012-03-30 11:46:59 -0700 | [diff] [blame] | 39 | |
Jiang Liu | 13d09b6 | 2015-01-07 15:31:37 +0800 | [diff] [blame] | 40 | static int __read_mostly eim_mode; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 41 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 42 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
Chris Wright | d1423d5 | 2010-07-20 11:06:49 -0700 | [diff] [blame] | 43 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 44 | /* |
| 45 | * Lock ordering: |
| 46 | * ->dmar_global_lock |
| 47 | * ->irq_2_ir_lock |
| 48 | * ->qi->q_lock |
| 49 | * ->iommu->register_lock |
| 50 | * Note: |
| 51 | * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called |
| 52 | * in single-threaded environment with interrupt disabled, so no need to tabke |
| 53 | * the dmar_global_lock. |
| 54 | */ |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 55 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 56 | |
Jiang Liu | 694835d | 2014-01-06 14:18:16 +0800 | [diff] [blame] | 57 | static int __init parse_ioapics_under_ir(void); |
| 58 | |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 59 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
| 60 | { |
Jiang Liu | 91411da | 2014-10-27 16:12:09 +0800 | [diff] [blame] | 61 | struct irq_cfg *cfg = irq_cfg(irq); |
Thomas Gleixner | 349d676 | 2010-10-10 12:29:27 +0200 | [diff] [blame] | 62 | return cfg ? &cfg->irq_2_iommu : NULL; |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 63 | } |
| 64 | |
Rashika Kheria | 6a7885c | 2013-12-18 12:04:27 +0530 | [diff] [blame] | 65 | static int get_irte(int irq, struct irte *entry) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 66 | { |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 67 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 68 | unsigned long flags; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 69 | int index; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 70 | |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 71 | if (!entry || !irq_iommu) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 72 | return -1; |
| 73 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 74 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 75 | |
Greg Edwards | af43746 | 2014-07-23 10:13:26 -0600 | [diff] [blame] | 76 | if (unlikely(!irq_iommu->iommu)) { |
| 77 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
| 78 | return -1; |
| 79 | } |
| 80 | |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 81 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 82 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 83 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 84 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 85 | return 0; |
| 86 | } |
| 87 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 88 | static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 89 | { |
| 90 | struct ir_table *table = iommu->ir_table; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 91 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Jiang Liu | 91411da | 2014-10-27 16:12:09 +0800 | [diff] [blame] | 92 | struct irq_cfg *cfg = irq_cfg(irq); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 93 | unsigned int mask = 0; |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 94 | unsigned long flags; |
Dan Carpenter | 9f4c744 | 2014-01-09 08:32:36 +0300 | [diff] [blame] | 95 | int index; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 96 | |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 97 | if (!count || !irq_iommu) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 98 | return -1; |
| 99 | |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 100 | if (count > 1) { |
| 101 | count = __roundup_pow_of_two(count); |
| 102 | mask = ilog2(count); |
| 103 | } |
| 104 | |
| 105 | if (mask > ecap_max_handle_mask(iommu->ecap)) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 106 | pr_err("Requested mask %x exceeds the max invalidation handle" |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 107 | " mask value %Lx\n", mask, |
| 108 | ecap_max_handle_mask(iommu->ecap)); |
| 109 | return -1; |
| 110 | } |
| 111 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 112 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 113 | index = bitmap_find_free_region(table->bitmap, |
| 114 | INTR_REMAP_TABLE_ENTRIES, mask); |
| 115 | if (index < 0) { |
| 116 | pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); |
| 117 | } else { |
| 118 | cfg->remapped = 1; |
| 119 | irq_iommu->iommu = iommu; |
| 120 | irq_iommu->irte_index = index; |
| 121 | irq_iommu->sub_handle = 0; |
| 122 | irq_iommu->irte_mask = mask; |
| 123 | } |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 124 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 125 | |
| 126 | return index; |
| 127 | } |
| 128 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 129 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 130 | { |
| 131 | struct qi_desc desc; |
| 132 | |
| 133 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) |
| 134 | | QI_IEC_SELECTIVE; |
| 135 | desc.high = 0; |
| 136 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 137 | return qi_submit_sync(&desc, iommu); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 140 | static int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 141 | { |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 142 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 143 | unsigned long flags; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 144 | int index; |
| 145 | |
| 146 | if (!irq_iommu) |
| 147 | return -1; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 148 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 149 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 150 | *sub_handle = irq_iommu->sub_handle; |
| 151 | index = irq_iommu->irte_index; |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 152 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 153 | return index; |
| 154 | } |
| 155 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 156 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 157 | { |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 158 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Jiang Liu | 91411da | 2014-10-27 16:12:09 +0800 | [diff] [blame] | 159 | struct irq_cfg *cfg = irq_cfg(irq); |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 160 | unsigned long flags; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 161 | |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 162 | if (!irq_iommu) |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 163 | return -1; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 164 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 165 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 166 | |
Joerg Roedel | 9b1b0e4 | 2012-09-26 12:44:45 +0200 | [diff] [blame] | 167 | cfg->remapped = 1; |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 168 | irq_iommu->iommu = iommu; |
| 169 | irq_iommu->irte_index = index; |
| 170 | irq_iommu->sub_handle = subhandle; |
| 171 | irq_iommu->irte_mask = 0; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 172 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 173 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 178 | static int modify_irte(int irq, struct irte *irte_modified) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 179 | { |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 180 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 181 | struct intel_iommu *iommu; |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 182 | unsigned long flags; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 183 | struct irte *irte; |
| 184 | int rc, index; |
| 185 | |
| 186 | if (!irq_iommu) |
| 187 | return -1; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 188 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 189 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 190 | |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 191 | iommu = irq_iommu->iommu; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 192 | |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 193 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 194 | irte = &iommu->ir_table->base[index]; |
| 195 | |
Linus Torvalds | c513b67 | 2010-08-06 11:02:31 -0700 | [diff] [blame] | 196 | set_64bit(&irte->low, irte_modified->low); |
| 197 | set_64bit(&irte->high, irte_modified->high); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 198 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
| 199 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 200 | rc = qi_flush_iec(iommu, index, 0); |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 201 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 202 | |
| 203 | return rc; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 204 | } |
| 205 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 206 | static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 207 | { |
| 208 | int i; |
| 209 | |
| 210 | for (i = 0; i < MAX_HPET_TBS; i++) |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 211 | if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 212 | return ir_hpet[i].iommu; |
| 213 | return NULL; |
| 214 | } |
| 215 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 216 | static struct intel_iommu *map_ioapic_to_ir(int apic) |
Suresh Siddha | 89027d3 | 2008-07-10 11:16:56 -0700 | [diff] [blame] | 217 | { |
| 218 | int i; |
| 219 | |
| 220 | for (i = 0; i < MAX_IO_APICS; i++) |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 221 | if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) |
Suresh Siddha | 89027d3 | 2008-07-10 11:16:56 -0700 | [diff] [blame] | 222 | return ir_ioapic[i].iommu; |
| 223 | return NULL; |
| 224 | } |
| 225 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 226 | static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
Suresh Siddha | 75c46fa | 2008-07-10 11:16:57 -0700 | [diff] [blame] | 227 | { |
| 228 | struct dmar_drhd_unit *drhd; |
| 229 | |
| 230 | drhd = dmar_find_matched_drhd_unit(dev); |
| 231 | if (!drhd) |
| 232 | return NULL; |
| 233 | |
| 234 | return drhd->iommu; |
| 235 | } |
| 236 | |
Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 237 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
| 238 | { |
| 239 | struct irte *start, *entry, *end; |
| 240 | struct intel_iommu *iommu; |
| 241 | int index; |
| 242 | |
| 243 | if (irq_iommu->sub_handle) |
| 244 | return 0; |
| 245 | |
| 246 | iommu = irq_iommu->iommu; |
| 247 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
| 248 | |
| 249 | start = iommu->ir_table->base + index; |
| 250 | end = start + (1 << irq_iommu->irte_mask); |
| 251 | |
| 252 | for (entry = start; entry < end; entry++) { |
Linus Torvalds | c513b67 | 2010-08-06 11:02:31 -0700 | [diff] [blame] | 253 | set_64bit(&entry->low, 0); |
| 254 | set_64bit(&entry->high, 0); |
Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 255 | } |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 256 | bitmap_release_region(iommu->ir_table->bitmap, index, |
| 257 | irq_iommu->irte_mask); |
Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 258 | |
| 259 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
| 260 | } |
| 261 | |
Joerg Roedel | 9d619f6 | 2012-03-30 11:47:04 -0700 | [diff] [blame] | 262 | static int free_irte(int irq) |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 263 | { |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 264 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 265 | unsigned long flags; |
Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 266 | int rc; |
| 267 | |
| 268 | if (!irq_iommu) |
| 269 | return -1; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 270 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 271 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 272 | |
Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 273 | rc = clear_entries(irq_iommu); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 274 | |
Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 275 | irq_iommu->iommu = NULL; |
| 276 | irq_iommu->irte_index = 0; |
| 277 | irq_iommu->sub_handle = 0; |
| 278 | irq_iommu->irte_mask = 0; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 279 | |
Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 280 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 281 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 282 | return rc; |
Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 285 | /* |
| 286 | * source validation type |
| 287 | */ |
| 288 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 289 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 290 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ |
| 291 | |
| 292 | /* |
| 293 | * source-id qualifier |
| 294 | */ |
| 295 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ |
| 296 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore |
| 297 | * the third least significant bit |
| 298 | */ |
| 299 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore |
| 300 | * the second and third least significant bits |
| 301 | */ |
| 302 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore |
| 303 | * the least three significant bits |
| 304 | */ |
| 305 | |
| 306 | /* |
| 307 | * set SVT, SQ and SID fields of irte to verify |
| 308 | * source ids of interrupt requests |
| 309 | */ |
| 310 | static void set_irte_sid(struct irte *irte, unsigned int svt, |
| 311 | unsigned int sq, unsigned int sid) |
| 312 | { |
Chris Wright | d1423d5 | 2010-07-20 11:06:49 -0700 | [diff] [blame] | 313 | if (disable_sourceid_checking) |
| 314 | svt = SVT_NO_VERIFY; |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 315 | irte->svt = svt; |
| 316 | irte->sq = sq; |
| 317 | irte->sid = sid; |
| 318 | } |
| 319 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 320 | static int set_ioapic_sid(struct irte *irte, int apic) |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 321 | { |
| 322 | int i; |
| 323 | u16 sid = 0; |
| 324 | |
| 325 | if (!irte) |
| 326 | return -1; |
| 327 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 328 | down_read(&dmar_global_lock); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 329 | for (i = 0; i < MAX_IO_APICS; i++) { |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 330 | if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 331 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
| 332 | break; |
| 333 | } |
| 334 | } |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 335 | up_read(&dmar_global_lock); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 336 | |
| 337 | if (sid == 0) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 338 | pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 339 | return -1; |
| 340 | } |
| 341 | |
Jiang Liu | 2fe2c60 | 2014-01-06 14:18:17 +0800 | [diff] [blame] | 342 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 347 | static int set_hpet_sid(struct irte *irte, u8 id) |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 348 | { |
| 349 | int i; |
| 350 | u16 sid = 0; |
| 351 | |
| 352 | if (!irte) |
| 353 | return -1; |
| 354 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 355 | down_read(&dmar_global_lock); |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 356 | for (i = 0; i < MAX_HPET_TBS; i++) { |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 357 | if (ir_hpet[i].iommu && ir_hpet[i].id == id) { |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 358 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
| 359 | break; |
| 360 | } |
| 361 | } |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 362 | up_read(&dmar_global_lock); |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 363 | |
| 364 | if (sid == 0) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 365 | pr_warn("Failed to set source-id of HPET block (%d)\n", id); |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 366 | return -1; |
| 367 | } |
| 368 | |
| 369 | /* |
| 370 | * Should really use SQ_ALL_16. Some platforms are broken. |
| 371 | * While we figure out the right quirks for these broken platforms, use |
| 372 | * SQ_13_IGNORE_3 for now. |
| 373 | */ |
| 374 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); |
| 375 | |
| 376 | return 0; |
| 377 | } |
| 378 | |
Alex Williamson | 579305f | 2014-07-03 09:51:43 -0600 | [diff] [blame] | 379 | struct set_msi_sid_data { |
| 380 | struct pci_dev *pdev; |
| 381 | u16 alias; |
| 382 | }; |
| 383 | |
| 384 | static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) |
| 385 | { |
| 386 | struct set_msi_sid_data *data = opaque; |
| 387 | |
| 388 | data->pdev = pdev; |
| 389 | data->alias = alias; |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 394 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 395 | { |
Alex Williamson | 579305f | 2014-07-03 09:51:43 -0600 | [diff] [blame] | 396 | struct set_msi_sid_data data; |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 397 | |
| 398 | if (!irte || !dev) |
| 399 | return -1; |
| 400 | |
Alex Williamson | 579305f | 2014-07-03 09:51:43 -0600 | [diff] [blame] | 401 | pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 402 | |
Alex Williamson | 579305f | 2014-07-03 09:51:43 -0600 | [diff] [blame] | 403 | /* |
| 404 | * DMA alias provides us with a PCI device and alias. The only case |
| 405 | * where the it will return an alias on a different bus than the |
| 406 | * device is the case of a PCIe-to-PCI bridge, where the alias is for |
| 407 | * the subordinate bus. In this case we can only verify the bus. |
| 408 | * |
| 409 | * If the alias device is on a different bus than our source device |
| 410 | * then we have a topology based alias, use it. |
| 411 | * |
| 412 | * Otherwise, the alias is for a device DMA quirk and we cannot |
| 413 | * assume that MSI uses the same requester ID. Therefore use the |
| 414 | * original device. |
| 415 | */ |
| 416 | if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) |
| 417 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
| 418 | PCI_DEVID(PCI_BUS_NUM(data.alias), |
| 419 | dev->bus->number)); |
| 420 | else if (data.pdev->bus->number != dev->bus->number) |
| 421 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); |
| 422 | else |
| 423 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
| 424 | PCI_DEVID(dev->bus->number, dev->devfn)); |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 425 | |
| 426 | return 0; |
| 427 | } |
| 428 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 429 | static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 430 | { |
| 431 | u64 addr; |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 432 | u32 sts; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 433 | unsigned long flags; |
| 434 | |
| 435 | addr = virt_to_phys((void *)iommu->ir_table->base); |
| 436 | |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 437 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 438 | |
| 439 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, |
| 440 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); |
| 441 | |
| 442 | /* Set interrupt-remapping table pointer */ |
Jan Kiszka | f63ef69 | 2014-08-11 13:13:25 +0200 | [diff] [blame] | 443 | writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 444 | |
| 445 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 446 | readl, (sts & DMA_GSTS_IRTPS), sts); |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 447 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 448 | |
| 449 | /* |
| 450 | * global invalidation of interrupt entry cache before enabling |
| 451 | * interrupt-remapping. |
| 452 | */ |
| 453 | qi_global_iec(iommu); |
| 454 | |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 455 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 456 | |
| 457 | /* Enable interrupt-remapping */ |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 458 | iommu->gcmd |= DMA_GCMD_IRE; |
Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 459 | iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 460 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 461 | |
| 462 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 463 | readl, (sts & DMA_GSTS_IRES), sts); |
| 464 | |
Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 465 | /* |
| 466 | * With CFI clear in the Global Command register, we should be |
| 467 | * protected from dangerous (i.e. compatibility) interrupts |
| 468 | * regardless of x2apic status. Check just to be sure. |
| 469 | */ |
| 470 | if (sts & DMA_GSTS_CFIS) |
| 471 | WARN(1, KERN_WARNING |
| 472 | "Compatibility-format IRQs enabled despite intr remapping;\n" |
| 473 | "you are vulnerable to IRQ injection.\n"); |
| 474 | |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 475 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 476 | } |
| 477 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 478 | static int intel_setup_irq_remapping(struct intel_iommu *iommu) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 479 | { |
| 480 | struct ir_table *ir_table; |
| 481 | struct page *pages; |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 482 | unsigned long *bitmap; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 483 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 484 | if (iommu->ir_table) |
| 485 | return 0; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 486 | |
Thomas Gleixner | e3a981d | 2015-01-07 15:31:30 +0800 | [diff] [blame] | 487 | ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 488 | if (!ir_table) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 489 | return -ENOMEM; |
| 490 | |
Thomas Gleixner | e3a981d | 2015-01-07 15:31:30 +0800 | [diff] [blame] | 491 | pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, |
Suresh Siddha | 824cd75 | 2009-10-02 11:01:23 -0700 | [diff] [blame] | 492 | INTR_REMAP_PAGE_ORDER); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 493 | |
| 494 | if (!pages) { |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 495 | pr_err("IR%d: failed to allocate pages of order %d\n", |
| 496 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 497 | goto out_free_table; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 498 | } |
| 499 | |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 500 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), |
| 501 | sizeof(long), GFP_ATOMIC); |
| 502 | if (bitmap == NULL) { |
| 503 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 504 | goto out_free_pages; |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 505 | } |
| 506 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 507 | ir_table->base = page_address(pages); |
Jiang Liu | 360eb3c5 | 2014-01-06 14:18:08 +0800 | [diff] [blame] | 508 | ir_table->bitmap = bitmap; |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 509 | iommu->ir_table = ir_table; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 510 | return 0; |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 511 | |
| 512 | out_free_pages: |
| 513 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); |
| 514 | out_free_table: |
| 515 | kfree(ir_table); |
| 516 | return -ENOMEM; |
| 517 | } |
| 518 | |
| 519 | static void intel_teardown_irq_remapping(struct intel_iommu *iommu) |
| 520 | { |
| 521 | if (iommu && iommu->ir_table) { |
| 522 | free_pages((unsigned long)iommu->ir_table->base, |
| 523 | INTR_REMAP_PAGE_ORDER); |
| 524 | kfree(iommu->ir_table->bitmap); |
| 525 | kfree(iommu->ir_table); |
| 526 | iommu->ir_table = NULL; |
| 527 | } |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 528 | } |
| 529 | |
Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 530 | /* |
| 531 | * Disable Interrupt Remapping. |
| 532 | */ |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 533 | static void iommu_disable_irq_remapping(struct intel_iommu *iommu) |
Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 534 | { |
| 535 | unsigned long flags; |
| 536 | u32 sts; |
| 537 | |
| 538 | if (!ecap_ir_support(iommu->ecap)) |
| 539 | return; |
| 540 | |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 541 | /* |
| 542 | * global invalidation of interrupt entry cache before disabling |
| 543 | * interrupt-remapping. |
| 544 | */ |
| 545 | qi_global_iec(iommu); |
| 546 | |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 547 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 548 | |
| 549 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
| 550 | if (!(sts & DMA_GSTS_IRES)) |
| 551 | goto end; |
| 552 | |
| 553 | iommu->gcmd &= ~DMA_GCMD_IRE; |
| 554 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 555 | |
| 556 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
| 557 | readl, !(sts & DMA_GSTS_IRES), sts); |
| 558 | |
| 559 | end: |
Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 560 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 561 | } |
| 562 | |
Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 563 | static int __init dmar_x2apic_optout(void) |
| 564 | { |
| 565 | struct acpi_table_dmar *dmar; |
| 566 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
| 567 | if (!dmar || no_x2apic_optout) |
| 568 | return 0; |
| 569 | return dmar->flags & DMAR_X2APIC_OPT_OUT; |
| 570 | } |
| 571 | |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 572 | static void __init intel_cleanup_irq_remapping(void) |
| 573 | { |
| 574 | struct dmar_drhd_unit *drhd; |
| 575 | struct intel_iommu *iommu; |
| 576 | |
| 577 | for_each_iommu(iommu, drhd) { |
| 578 | if (ecap_ir_support(iommu->ecap)) { |
| 579 | iommu_disable_irq_remapping(iommu); |
| 580 | intel_teardown_irq_remapping(iommu); |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | if (x2apic_supported()) |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 585 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | static int __init intel_prepare_irq_remapping(void) |
| 589 | { |
| 590 | struct dmar_drhd_unit *drhd; |
| 591 | struct intel_iommu *iommu; |
Joerg Roedel | 23256d0 | 2015-06-12 14:15:49 +0200 | [diff] [blame^] | 592 | int eim = 0; |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 593 | |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 594 | if (irq_remap_broken) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 595 | pr_warn("This system BIOS has enabled interrupt remapping\n" |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 596 | "on a chipset that contains an erratum making that\n" |
| 597 | "feature unstable. To maintain system stability\n" |
| 598 | "interrupt remapping is being disabled. Please\n" |
| 599 | "contact your BIOS vendor for an update\n"); |
| 600 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 601 | return -ENODEV; |
| 602 | } |
| 603 | |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 604 | if (dmar_table_init() < 0) |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 605 | return -ENODEV; |
| 606 | |
| 607 | if (!dmar_ir_support()) |
| 608 | return -ENODEV; |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 609 | |
| 610 | if (parse_ioapics_under_ir() != 1) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 611 | pr_info("Not enabling interrupt remapping\n"); |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 612 | goto error; |
| 613 | } |
| 614 | |
Joerg Roedel | 69cf1d8 | 2015-01-07 15:31:36 +0800 | [diff] [blame] | 615 | /* First make sure all IOMMUs support IRQ remapping */ |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 616 | for_each_iommu(iommu, drhd) |
Joerg Roedel | 69cf1d8 | 2015-01-07 15:31:36 +0800 | [diff] [blame] | 617 | if (!ecap_ir_support(iommu->ecap)) |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 618 | goto error; |
Joerg Roedel | 69cf1d8 | 2015-01-07 15:31:36 +0800 | [diff] [blame] | 619 | |
Joerg Roedel | 23256d0 | 2015-06-12 14:15:49 +0200 | [diff] [blame^] | 620 | /* Detect remapping mode: lapic or x2apic */ |
| 621 | if (x2apic_supported()) { |
| 622 | eim = !dmar_x2apic_optout(); |
| 623 | if (!eim) { |
| 624 | pr_info("x2apic is disabled because BIOS sets x2apic opt out bit."); |
| 625 | pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n"); |
| 626 | } |
| 627 | } |
| 628 | |
| 629 | for_each_iommu(iommu, drhd) { |
| 630 | if (eim && !ecap_eim_support(iommu->ecap)) { |
| 631 | pr_info("%s does not support EIM\n", iommu->name); |
| 632 | eim = 0; |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | eim_mode = eim; |
| 637 | if (eim) |
| 638 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); |
| 639 | |
Joerg Roedel | 69cf1d8 | 2015-01-07 15:31:36 +0800 | [diff] [blame] | 640 | /* Do the allocations early */ |
| 641 | for_each_iommu(iommu, drhd) |
| 642 | if (intel_setup_irq_remapping(iommu)) |
| 643 | goto error; |
| 644 | |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 645 | return 0; |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 646 | |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 647 | error: |
| 648 | intel_cleanup_irq_remapping(); |
Jiang Liu | 2966d95 | 2015-01-07 15:31:35 +0800 | [diff] [blame] | 649 | return -ENODEV; |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 650 | } |
| 651 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 652 | static int __init intel_enable_irq_remapping(void) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 653 | { |
| 654 | struct dmar_drhd_unit *drhd; |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 655 | struct intel_iommu *iommu; |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 656 | bool setup = false; |
Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 657 | |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 658 | for_each_iommu(iommu, drhd) { |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 659 | /* |
Han, Weidong | 34aaaa9 | 2009-04-04 17:21:26 +0800 | [diff] [blame] | 660 | * If the queued invalidation is already initialized, |
| 661 | * shouldn't disable it. |
| 662 | */ |
| 663 | if (iommu->qi) |
| 664 | continue; |
| 665 | |
| 666 | /* |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 667 | * Clear previous faults. |
| 668 | */ |
| 669 | dmar_fault(-1, iommu); |
| 670 | |
| 671 | /* |
| 672 | * Disable intr remapping and queued invalidation, if already |
| 673 | * enabled prior to OS handover. |
| 674 | */ |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 675 | iommu_disable_irq_remapping(iommu); |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 676 | |
| 677 | dmar_disable_qi(iommu); |
| 678 | } |
| 679 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 680 | /* |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 681 | * Enable queued invalidation for all the DRHD's. |
| 682 | */ |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 683 | for_each_iommu(iommu, drhd) { |
| 684 | int ret = dmar_enable_qi(iommu); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 685 | |
| 686 | if (ret) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 687 | pr_err("DRHD %Lx: failed to enable queued, " |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 688 | " invalidation, ecap %Lx, ret %d\n", |
| 689 | drhd->reg_base_addr, iommu->ecap, ret); |
Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 690 | goto error; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 691 | } |
| 692 | } |
| 693 | |
| 694 | /* |
| 695 | * Setup Interrupt-remapping for all the DRHD's now. |
| 696 | */ |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 697 | for_each_iommu(iommu, drhd) { |
Joerg Roedel | 23256d0 | 2015-06-12 14:15:49 +0200 | [diff] [blame^] | 698 | iommu_set_irq_remapping(iommu, eim_mode); |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 699 | setup = true; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | if (!setup) |
| 703 | goto error; |
| 704 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 705 | irq_remapping_enabled = 1; |
Joerg Roedel | afcc8a4 | 2012-09-26 12:44:36 +0200 | [diff] [blame] | 706 | |
| 707 | /* |
| 708 | * VT-d has a different layout for IO-APIC entries when |
| 709 | * interrupt remapping is enabled. So it needs a special routine |
| 710 | * to print IO-APIC entries for debugging purposes too. |
| 711 | */ |
| 712 | x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; |
| 713 | |
Joerg Roedel | 23256d0 | 2015-06-12 14:15:49 +0200 | [diff] [blame^] | 714 | pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic"); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 715 | |
Joerg Roedel | 23256d0 | 2015-06-12 14:15:49 +0200 | [diff] [blame^] | 716 | return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 717 | |
| 718 | error: |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 719 | intel_cleanup_irq_remapping(); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 720 | return -1; |
| 721 | } |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 722 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 723 | static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
| 724 | struct intel_iommu *iommu, |
| 725 | struct acpi_dmar_hardware_unit *drhd) |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 726 | { |
| 727 | struct acpi_dmar_pci_path *path; |
| 728 | u8 bus; |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 729 | int count, free = -1; |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 730 | |
| 731 | bus = scope->bus; |
| 732 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
| 733 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) |
| 734 | / sizeof(struct acpi_dmar_pci_path); |
| 735 | |
| 736 | while (--count > 0) { |
| 737 | /* |
| 738 | * Access PCI directly due to the PCI |
| 739 | * subsystem isn't initialized yet. |
| 740 | */ |
Lv Zheng | fa5f508 | 2013-10-31 09:30:22 +0800 | [diff] [blame] | 741 | bus = read_pci_config_byte(bus, path->device, path->function, |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 742 | PCI_SECONDARY_BUS); |
| 743 | path++; |
| 744 | } |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 745 | |
| 746 | for (count = 0; count < MAX_HPET_TBS; count++) { |
| 747 | if (ir_hpet[count].iommu == iommu && |
| 748 | ir_hpet[count].id == scope->enumeration_id) |
| 749 | return 0; |
| 750 | else if (ir_hpet[count].iommu == NULL && free == -1) |
| 751 | free = count; |
| 752 | } |
| 753 | if (free == -1) { |
| 754 | pr_warn("Exceeded Max HPET blocks\n"); |
| 755 | return -ENOSPC; |
| 756 | } |
| 757 | |
| 758 | ir_hpet[free].iommu = iommu; |
| 759 | ir_hpet[free].id = scope->enumeration_id; |
| 760 | ir_hpet[free].bus = bus; |
| 761 | ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); |
| 762 | pr_info("HPET id %d under DRHD base 0x%Lx\n", |
| 763 | scope->enumeration_id, drhd->address); |
| 764 | |
| 765 | return 0; |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 766 | } |
| 767 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 768 | static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
| 769 | struct intel_iommu *iommu, |
| 770 | struct acpi_dmar_hardware_unit *drhd) |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 771 | { |
| 772 | struct acpi_dmar_pci_path *path; |
| 773 | u8 bus; |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 774 | int count, free = -1; |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 775 | |
| 776 | bus = scope->bus; |
| 777 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
| 778 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) |
| 779 | / sizeof(struct acpi_dmar_pci_path); |
| 780 | |
| 781 | while (--count > 0) { |
| 782 | /* |
| 783 | * Access PCI directly due to the PCI |
| 784 | * subsystem isn't initialized yet. |
| 785 | */ |
Lv Zheng | fa5f508 | 2013-10-31 09:30:22 +0800 | [diff] [blame] | 786 | bus = read_pci_config_byte(bus, path->device, path->function, |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 787 | PCI_SECONDARY_BUS); |
| 788 | path++; |
| 789 | } |
| 790 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 791 | for (count = 0; count < MAX_IO_APICS; count++) { |
| 792 | if (ir_ioapic[count].iommu == iommu && |
| 793 | ir_ioapic[count].id == scope->enumeration_id) |
| 794 | return 0; |
| 795 | else if (ir_ioapic[count].iommu == NULL && free == -1) |
| 796 | free = count; |
| 797 | } |
| 798 | if (free == -1) { |
| 799 | pr_warn("Exceeded Max IO APICS\n"); |
| 800 | return -ENOSPC; |
| 801 | } |
| 802 | |
| 803 | ir_ioapic[free].bus = bus; |
| 804 | ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); |
| 805 | ir_ioapic[free].iommu = iommu; |
| 806 | ir_ioapic[free].id = scope->enumeration_id; |
| 807 | pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", |
| 808 | scope->enumeration_id, drhd->address, iommu->seq_id); |
| 809 | |
| 810 | return 0; |
Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 811 | } |
| 812 | |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 813 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
| 814 | struct intel_iommu *iommu) |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 815 | { |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 816 | int ret = 0; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 817 | struct acpi_dmar_hardware_unit *drhd; |
| 818 | struct acpi_dmar_device_scope *scope; |
| 819 | void *start, *end; |
| 820 | |
| 821 | drhd = (struct acpi_dmar_hardware_unit *)header; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 822 | start = (void *)(drhd + 1); |
| 823 | end = ((void *)drhd) + header->length; |
| 824 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 825 | while (start < end && ret == 0) { |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 826 | scope = start; |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 827 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) |
| 828 | ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); |
| 829 | else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) |
| 830 | ret = ir_parse_one_hpet_scope(scope, iommu, drhd); |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 831 | start += scope->length; |
| 832 | } |
| 833 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 834 | return ret; |
| 835 | } |
| 836 | |
| 837 | static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) |
| 838 | { |
| 839 | int i; |
| 840 | |
| 841 | for (i = 0; i < MAX_HPET_TBS; i++) |
| 842 | if (ir_hpet[i].iommu == iommu) |
| 843 | ir_hpet[i].iommu = NULL; |
| 844 | |
| 845 | for (i = 0; i < MAX_IO_APICS; i++) |
| 846 | if (ir_ioapic[i].iommu == iommu) |
| 847 | ir_ioapic[i].iommu = NULL; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | /* |
| 851 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping |
| 852 | * hardware unit. |
| 853 | */ |
Jiang Liu | 694835d | 2014-01-06 14:18:16 +0800 | [diff] [blame] | 854 | static int __init parse_ioapics_under_ir(void) |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 855 | { |
| 856 | struct dmar_drhd_unit *drhd; |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 857 | struct intel_iommu *iommu; |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 858 | bool ir_supported = false; |
Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 859 | int ioapic_idx; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 860 | |
Jiang Liu | 7c91977 | 2014-01-06 14:18:18 +0800 | [diff] [blame] | 861 | for_each_iommu(iommu, drhd) |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 862 | if (ecap_ir_support(iommu->ecap)) { |
Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 863 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 864 | return -1; |
| 865 | |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 866 | ir_supported = true; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 867 | } |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 868 | |
Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 869 | if (!ir_supported) |
| 870 | return 0; |
| 871 | |
| 872 | for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { |
| 873 | int ioapic_id = mpc_ioapic_id(ioapic_idx); |
| 874 | if (!map_ioapic_to_ir(ioapic_id)) { |
| 875 | pr_err(FW_BUG "ioapic %d has no mapping iommu, " |
| 876 | "interrupt remapping will be disabled\n", |
| 877 | ioapic_id); |
| 878 | return -1; |
| 879 | } |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 880 | } |
| 881 | |
Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 882 | return 1; |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 883 | } |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 884 | |
Rashika Kheria | 6a7885c | 2013-12-18 12:04:27 +0530 | [diff] [blame] | 885 | static int __init ir_dev_scope_init(void) |
Suresh Siddha | c2c7286 | 2011-08-23 17:05:19 -0700 | [diff] [blame] | 886 | { |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 887 | int ret; |
| 888 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 889 | if (!irq_remapping_enabled) |
Suresh Siddha | c2c7286 | 2011-08-23 17:05:19 -0700 | [diff] [blame] | 890 | return 0; |
| 891 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 892 | down_write(&dmar_global_lock); |
| 893 | ret = dmar_dev_scope_init(); |
| 894 | up_write(&dmar_global_lock); |
| 895 | |
| 896 | return ret; |
Suresh Siddha | c2c7286 | 2011-08-23 17:05:19 -0700 | [diff] [blame] | 897 | } |
| 898 | rootfs_initcall(ir_dev_scope_init); |
| 899 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 900 | static void disable_irq_remapping(void) |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 901 | { |
| 902 | struct dmar_drhd_unit *drhd; |
| 903 | struct intel_iommu *iommu = NULL; |
| 904 | |
| 905 | /* |
| 906 | * Disable Interrupt-remapping for all the DRHD's now. |
| 907 | */ |
| 908 | for_each_iommu(iommu, drhd) { |
| 909 | if (!ecap_ir_support(iommu->ecap)) |
| 910 | continue; |
| 911 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 912 | iommu_disable_irq_remapping(iommu); |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 913 | } |
| 914 | } |
| 915 | |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 916 | static int reenable_irq_remapping(int eim) |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 917 | { |
| 918 | struct dmar_drhd_unit *drhd; |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 919 | bool setup = false; |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 920 | struct intel_iommu *iommu = NULL; |
| 921 | |
| 922 | for_each_iommu(iommu, drhd) |
| 923 | if (iommu->qi) |
| 924 | dmar_reenable_qi(iommu); |
| 925 | |
| 926 | /* |
| 927 | * Setup Interrupt-remapping for all the DRHD's now. |
| 928 | */ |
| 929 | for_each_iommu(iommu, drhd) { |
| 930 | if (!ecap_ir_support(iommu->ecap)) |
| 931 | continue; |
| 932 | |
| 933 | /* Set up interrupt remapping for iommu.*/ |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 934 | iommu_set_irq_remapping(iommu, eim); |
Quentin Lambert | 2f119c7 | 2015-02-06 10:59:53 +0100 | [diff] [blame] | 935 | setup = true; |
Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 936 | } |
| 937 | |
| 938 | if (!setup) |
| 939 | goto error; |
| 940 | |
| 941 | return 0; |
| 942 | |
| 943 | error: |
| 944 | /* |
| 945 | * handle error condition gracefully here! |
| 946 | */ |
| 947 | return -1; |
| 948 | } |
| 949 | |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 950 | static void prepare_irte(struct irte *irte, int vector, |
| 951 | unsigned int dest) |
| 952 | { |
| 953 | memset(irte, 0, sizeof(*irte)); |
| 954 | |
| 955 | irte->present = 1; |
| 956 | irte->dst_mode = apic->irq_dest_mode; |
| 957 | /* |
| 958 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the |
| 959 | * actual level or edge trigger will be setup in the IO-APIC |
| 960 | * RTE. This will help simplify level triggered irq migration. |
| 961 | * For more details, see the comments (in io_apic.c) explainig IO-APIC |
| 962 | * irq migration in the presence of interrupt-remapping. |
| 963 | */ |
| 964 | irte->trigger_mode = 0; |
| 965 | irte->dlvry_mode = apic->irq_delivery_mode; |
| 966 | irte->vector = vector; |
| 967 | irte->dest_id = IRTE_DEST(dest); |
| 968 | irte->redir_hint = 1; |
| 969 | } |
| 970 | |
| 971 | static int intel_setup_ioapic_entry(int irq, |
| 972 | struct IO_APIC_route_entry *route_entry, |
| 973 | unsigned int destination, int vector, |
| 974 | struct io_apic_irq_attr *attr) |
| 975 | { |
| 976 | int ioapic_id = mpc_ioapic_id(attr->ioapic); |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 977 | struct intel_iommu *iommu; |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 978 | struct IR_IO_APIC_route_entry *entry; |
| 979 | struct irte irte; |
| 980 | int index; |
| 981 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 982 | down_read(&dmar_global_lock); |
| 983 | iommu = map_ioapic_to_ir(ioapic_id); |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 984 | if (!iommu) { |
| 985 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 986 | index = -ENODEV; |
| 987 | } else { |
| 988 | index = alloc_irte(iommu, irq, 1); |
| 989 | if (index < 0) { |
| 990 | pr_warn("Failed to allocate IRTE for ioapic %d\n", |
| 991 | ioapic_id); |
| 992 | index = -ENOMEM; |
| 993 | } |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 994 | } |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 995 | up_read(&dmar_global_lock); |
| 996 | if (index < 0) |
| 997 | return index; |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 998 | |
| 999 | prepare_irte(&irte, vector, destination); |
| 1000 | |
| 1001 | /* Set source-id of interrupt request */ |
| 1002 | set_ioapic_sid(&irte, ioapic_id); |
| 1003 | |
| 1004 | modify_irte(irq, &irte); |
| 1005 | |
| 1006 | apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " |
| 1007 | "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " |
| 1008 | "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " |
| 1009 | "Avail:%X Vector:%02X Dest:%08X " |
| 1010 | "SID:%04X SQ:%X SVT:%X)\n", |
| 1011 | attr->ioapic, irte.present, irte.fpd, irte.dst_mode, |
| 1012 | irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, |
| 1013 | irte.avail, irte.vector, irte.dest_id, |
| 1014 | irte.sid, irte.sq, irte.svt); |
| 1015 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1016 | entry = (struct IR_IO_APIC_route_entry *)route_entry; |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 1017 | memset(entry, 0, sizeof(*entry)); |
| 1018 | |
| 1019 | entry->index2 = (index >> 15) & 0x1; |
| 1020 | entry->zero = 0; |
| 1021 | entry->format = 1; |
| 1022 | entry->index = (index & 0x7fff); |
| 1023 | /* |
| 1024 | * IO-APIC RTE will be configured with virtual vector. |
| 1025 | * irq handler will do the explicit EOI to the io-apic. |
| 1026 | */ |
| 1027 | entry->vector = attr->ioapic_pin; |
| 1028 | entry->mask = 0; /* enable IRQ */ |
| 1029 | entry->trigger = attr->trigger; |
| 1030 | entry->polarity = attr->polarity; |
| 1031 | |
| 1032 | /* Mask level triggered irqs. |
| 1033 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. |
| 1034 | */ |
| 1035 | if (attr->trigger) |
| 1036 | entry->mask = 1; |
| 1037 | |
| 1038 | return 0; |
| 1039 | } |
| 1040 | |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1041 | /* |
| 1042 | * Migrate the IO-APIC irq in the presence of intr-remapping. |
| 1043 | * |
| 1044 | * For both level and edge triggered, irq migration is a simple atomic |
| 1045 | * update(of vector and cpu destination) of IRTE and flush the hardware cache. |
| 1046 | * |
| 1047 | * For level triggered, we eliminate the io-apic RTE modification (with the |
| 1048 | * updated vector information), by using a virtual vector (io-apic pin number). |
| 1049 | * Real vector that is used for interrupting cpu will be coming from |
| 1050 | * the interrupt-remapping table entry. |
| 1051 | * |
| 1052 | * As the migration is a simple atomic update of IRTE, the same mechanism |
| 1053 | * is used to migrate MSI irq's in the presence of interrupt-remapping. |
| 1054 | */ |
| 1055 | static int |
| 1056 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 1057 | bool force) |
| 1058 | { |
Jiang Liu | 91411da | 2014-10-27 16:12:09 +0800 | [diff] [blame] | 1059 | struct irq_cfg *cfg = irqd_cfg(data); |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1060 | unsigned int dest, irq = data->irq; |
| 1061 | struct irte irte; |
Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 1062 | int err; |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1063 | |
Suresh Siddha | 7eb9ae0 | 2012-06-14 18:28:49 -0700 | [diff] [blame] | 1064 | if (!config_enabled(CONFIG_SMP)) |
| 1065 | return -EINVAL; |
| 1066 | |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1067 | if (!cpumask_intersects(mask, cpu_online_mask)) |
| 1068 | return -EINVAL; |
| 1069 | |
| 1070 | if (get_irte(irq, &irte)) |
| 1071 | return -EBUSY; |
| 1072 | |
Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 1073 | err = assign_irq_vector(irq, cfg, mask); |
| 1074 | if (err) |
| 1075 | return err; |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1076 | |
Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 1077 | err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); |
| 1078 | if (err) { |
Dan Carpenter | ed88bed | 2012-06-12 19:26:33 +0300 | [diff] [blame] | 1079 | if (assign_irq_vector(irq, cfg, data->affinity)) |
Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 1080 | pr_err("Failed to recover vector for irq %d\n", irq); |
| 1081 | return err; |
| 1082 | } |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1083 | |
| 1084 | irte.vector = cfg->vector; |
| 1085 | irte.dest_id = IRTE_DEST(dest); |
| 1086 | |
| 1087 | /* |
| 1088 | * Atomically updates the IRTE with the new destination, vector |
| 1089 | * and flushes the interrupt entry cache. |
| 1090 | */ |
| 1091 | modify_irte(irq, &irte); |
| 1092 | |
| 1093 | /* |
| 1094 | * After this point, all the interrupts will start arriving |
| 1095 | * at the new destination. So, time to cleanup the previous |
| 1096 | * vector allocation. |
| 1097 | */ |
| 1098 | if (cfg->move_in_progress) |
| 1099 | send_cleanup_vector(cfg); |
| 1100 | |
| 1101 | cpumask_copy(data->affinity, mask); |
| 1102 | return 0; |
| 1103 | } |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 1104 | |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1105 | static void intel_compose_msi_msg(struct pci_dev *pdev, |
| 1106 | unsigned int irq, unsigned int dest, |
| 1107 | struct msi_msg *msg, u8 hpet_id) |
| 1108 | { |
| 1109 | struct irq_cfg *cfg; |
| 1110 | struct irte irte; |
Suresh Siddha | c558df4 | 2012-05-08 00:08:54 -0700 | [diff] [blame] | 1111 | u16 sub_handle = 0; |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1112 | int ir_index; |
| 1113 | |
Jiang Liu | 91411da | 2014-10-27 16:12:09 +0800 | [diff] [blame] | 1114 | cfg = irq_cfg(irq); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1115 | |
| 1116 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); |
| 1117 | BUG_ON(ir_index == -1); |
| 1118 | |
| 1119 | prepare_irte(&irte, cfg->vector, dest); |
| 1120 | |
| 1121 | /* Set source-id of interrupt request */ |
| 1122 | if (pdev) |
| 1123 | set_msi_sid(&irte, pdev); |
| 1124 | else |
| 1125 | set_hpet_sid(&irte, hpet_id); |
| 1126 | |
| 1127 | modify_irte(irq, &irte); |
| 1128 | |
| 1129 | msg->address_hi = MSI_ADDR_BASE_HI; |
| 1130 | msg->data = sub_handle; |
| 1131 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | |
| 1132 | MSI_ADDR_IR_SHV | |
| 1133 | MSI_ADDR_IR_INDEX1(ir_index) | |
| 1134 | MSI_ADDR_IR_INDEX2(ir_index); |
| 1135 | } |
| 1136 | |
| 1137 | /* |
| 1138 | * Map the PCI dev to the corresponding remapping hardware unit |
| 1139 | * and allocate 'nvec' consecutive interrupt-remapping table entries |
| 1140 | * in it. |
| 1141 | */ |
| 1142 | static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) |
| 1143 | { |
| 1144 | struct intel_iommu *iommu; |
| 1145 | int index; |
| 1146 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1147 | down_read(&dmar_global_lock); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1148 | iommu = map_dev_to_ir(dev); |
| 1149 | if (!iommu) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 1150 | pr_err("Unable to map PCI %s to iommu\n", pci_name(dev)); |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1151 | index = -ENOENT; |
| 1152 | } else { |
| 1153 | index = alloc_irte(iommu, irq, nvec); |
| 1154 | if (index < 0) { |
Joerg Roedel | 9f10e5b | 2015-06-12 09:57:06 +0200 | [diff] [blame] | 1155 | pr_err("Unable to allocate %d IRTE for PCI %s\n", |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1156 | nvec, pci_name(dev)); |
| 1157 | index = -ENOSPC; |
| 1158 | } |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1159 | } |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1160 | up_read(&dmar_global_lock); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1161 | |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1162 | return index; |
| 1163 | } |
| 1164 | |
| 1165 | static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, |
| 1166 | int index, int sub_handle) |
| 1167 | { |
| 1168 | struct intel_iommu *iommu; |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1169 | int ret = -ENOENT; |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1170 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1171 | down_read(&dmar_global_lock); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1172 | iommu = map_dev_to_ir(pdev); |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1173 | if (iommu) { |
| 1174 | /* |
| 1175 | * setup the mapping between the irq and the IRTE |
| 1176 | * base index, the sub_handle pointing to the |
| 1177 | * appropriate interrupt remap table entry. |
| 1178 | */ |
| 1179 | set_irte_irq(irq, iommu, index, sub_handle); |
| 1180 | ret = 0; |
| 1181 | } |
| 1182 | up_read(&dmar_global_lock); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1183 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1184 | return ret; |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1185 | } |
| 1186 | |
Yijing Wang | 5fc24d8 | 2014-09-17 17:32:19 +0800 | [diff] [blame] | 1187 | static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1188 | { |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1189 | int ret = -1; |
| 1190 | struct intel_iommu *iommu; |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1191 | int index; |
| 1192 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1193 | down_read(&dmar_global_lock); |
| 1194 | iommu = map_hpet_to_ir(id); |
| 1195 | if (iommu) { |
| 1196 | index = alloc_irte(iommu, irq, 1); |
| 1197 | if (index >= 0) |
| 1198 | ret = 0; |
| 1199 | } |
| 1200 | up_read(&dmar_global_lock); |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1201 | |
Jiang Liu | 3a5670e | 2014-02-19 14:07:33 +0800 | [diff] [blame] | 1202 | return ret; |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1203 | } |
| 1204 | |
Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 1205 | struct irq_remap_ops intel_irq_remap_ops = { |
Thomas Gleixner | 1119030 | 2015-01-07 15:31:29 +0800 | [diff] [blame] | 1206 | .prepare = intel_prepare_irq_remapping, |
Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 1207 | .enable = intel_enable_irq_remapping, |
| 1208 | .disable = disable_irq_remapping, |
| 1209 | .reenable = reenable_irq_remapping, |
Joerg Roedel | 4f3d8b6 | 2012-03-30 11:47:01 -0700 | [diff] [blame] | 1210 | .enable_faulting = enable_drhd_fault_handling, |
Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 1211 | .setup_ioapic_entry = intel_setup_ioapic_entry, |
Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1212 | .set_affinity = intel_ioapic_set_affinity, |
Joerg Roedel | 9d619f6 | 2012-03-30 11:47:04 -0700 | [diff] [blame] | 1213 | .free_irq = free_irte, |
Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1214 | .compose_msi_msg = intel_compose_msi_msg, |
| 1215 | .msi_alloc_irq = intel_msi_alloc_irq, |
| 1216 | .msi_setup_irq = intel_msi_setup_irq, |
Yijing Wang | 5fc24d8 | 2014-09-17 17:32:19 +0800 | [diff] [blame] | 1217 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 1218 | }; |
Jiang Liu | 6b19724 | 2014-11-09 22:47:58 +0800 | [diff] [blame] | 1219 | |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 1220 | /* |
| 1221 | * Support of Interrupt Remapping Unit Hotplug |
| 1222 | */ |
| 1223 | static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) |
| 1224 | { |
| 1225 | int ret; |
| 1226 | int eim = x2apic_enabled(); |
| 1227 | |
| 1228 | if (eim && !ecap_eim_support(iommu->ecap)) { |
| 1229 | pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", |
| 1230 | iommu->reg_phys, iommu->ecap); |
| 1231 | return -ENODEV; |
| 1232 | } |
| 1233 | |
| 1234 | if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { |
| 1235 | pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", |
| 1236 | iommu->reg_phys); |
| 1237 | return -ENODEV; |
| 1238 | } |
| 1239 | |
| 1240 | /* TODO: check all IOAPICs are covered by IOMMU */ |
| 1241 | |
| 1242 | /* Setup Interrupt-remapping now. */ |
| 1243 | ret = intel_setup_irq_remapping(iommu); |
| 1244 | if (ret) { |
| 1245 | pr_err("DRHD %Lx: failed to allocate resource\n", |
| 1246 | iommu->reg_phys); |
| 1247 | ir_remove_ioapic_hpet_scope(iommu); |
| 1248 | return ret; |
| 1249 | } |
| 1250 | |
| 1251 | if (!iommu->qi) { |
| 1252 | /* Clear previous faults. */ |
| 1253 | dmar_fault(-1, iommu); |
| 1254 | iommu_disable_irq_remapping(iommu); |
| 1255 | dmar_disable_qi(iommu); |
| 1256 | } |
| 1257 | |
| 1258 | /* Enable queued invalidation */ |
| 1259 | ret = dmar_enable_qi(iommu); |
| 1260 | if (!ret) { |
| 1261 | iommu_set_irq_remapping(iommu, eim); |
| 1262 | } else { |
| 1263 | pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", |
| 1264 | iommu->reg_phys, iommu->ecap, ret); |
| 1265 | intel_teardown_irq_remapping(iommu); |
| 1266 | ir_remove_ioapic_hpet_scope(iommu); |
| 1267 | } |
| 1268 | |
| 1269 | return ret; |
| 1270 | } |
| 1271 | |
Jiang Liu | 6b19724 | 2014-11-09 22:47:58 +0800 | [diff] [blame] | 1272 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
| 1273 | { |
Jiang Liu | a7a3dad | 2014-11-09 22:48:00 +0800 | [diff] [blame] | 1274 | int ret = 0; |
| 1275 | struct intel_iommu *iommu = dmaru->iommu; |
| 1276 | |
| 1277 | if (!irq_remapping_enabled) |
| 1278 | return 0; |
| 1279 | if (iommu == NULL) |
| 1280 | return -EINVAL; |
| 1281 | if (!ecap_ir_support(iommu->ecap)) |
| 1282 | return 0; |
| 1283 | |
| 1284 | if (insert) { |
| 1285 | if (!iommu->ir_table) |
| 1286 | ret = dmar_ir_add(dmaru, iommu); |
| 1287 | } else { |
| 1288 | if (iommu->ir_table) { |
| 1289 | if (!bitmap_empty(iommu->ir_table->bitmap, |
| 1290 | INTR_REMAP_TABLE_ENTRIES)) { |
| 1291 | ret = -EBUSY; |
| 1292 | } else { |
| 1293 | iommu_disable_irq_remapping(iommu); |
| 1294 | intel_teardown_irq_remapping(iommu); |
| 1295 | ir_remove_ioapic_hpet_scope(iommu); |
| 1296 | } |
| 1297 | } |
| 1298 | } |
| 1299 | |
| 1300 | return ret; |
Jiang Liu | 6b19724 | 2014-11-09 22:47:58 +0800 | [diff] [blame] | 1301 | } |