locking, x86, iommu: Annotate irq_2_ir_lock as raw

The irq_2_ir_lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index b2443f1..73a9034 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -45,7 +45,7 @@
 }
 early_param("intremap", setup_intremap);
 
-static DEFINE_SPINLOCK(irq_2_ir_lock);
+static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
@@ -62,12 +62,12 @@
 	if (!entry || !irq_iommu)
 		return -1;
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 
 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
 	*entry = *(irq_iommu->iommu->ir_table->base + index);
 
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 	return 0;
 }
 
@@ -101,7 +101,7 @@
 		return -1;
 	}
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 	do {
 		for (i = index; i < index + count; i++)
 			if  (table->base[i].present)
@@ -113,7 +113,7 @@
 		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
 
 		if (index == start_index) {
-			spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 			printk(KERN_ERR "can't allocate an IRTE\n");
 			return -1;
 		}
@@ -127,7 +127,7 @@
 	irq_iommu->sub_handle = 0;
 	irq_iommu->irte_mask = mask;
 
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return index;
 }
@@ -152,10 +152,10 @@
 	if (!irq_iommu)
 		return -1;
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 	*sub_handle = irq_iommu->sub_handle;
 	index = irq_iommu->irte_index;
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 	return index;
 }
 
@@ -167,14 +167,14 @@
 	if (!irq_iommu)
 		return -1;
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 
 	irq_iommu->iommu = iommu;
 	irq_iommu->irte_index = index;
 	irq_iommu->sub_handle = subhandle;
 	irq_iommu->irte_mask = 0;
 
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return 0;
 }
@@ -190,7 +190,7 @@
 	if (!irq_iommu)
 		return -1;
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 
 	iommu = irq_iommu->iommu;
 
@@ -202,7 +202,7 @@
 	__iommu_flush_cache(iommu, irte, sizeof(*irte));
 
 	rc = qi_flush_iec(iommu, index, 0);
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return rc;
 }
@@ -270,7 +270,7 @@
 	if (!irq_iommu)
 		return -1;
 
-	spin_lock_irqsave(&irq_2_ir_lock, flags);
+	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 
 	rc = clear_entries(irq_iommu);
 
@@ -279,7 +279,7 @@
 	irq_iommu->sub_handle = 0;
 	irq_iommu->irte_mask = 0;
 
-	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return rc;
 }