blob: 77b7acc875c5df6755954161736b72918fff49ad [file] [log] [blame]
Andrew Mortonc777ac52006-03-25 03:07:36 -08001
Christoph Hellwigd824e662006-04-10 22:54:04 -07002#include <linux/irq.h>
Andrew Mortonc777ac52006-03-25 03:07:36 -08003
4void set_pending_irq(unsigned int irq, cpumask_t mask)
5{
Ingo Molnar34ffdb72006-06-29 02:24:40 -07006 struct irq_desc *desc = irq_desc + irq;
Andrew Mortonc777ac52006-03-25 03:07:36 -08007 unsigned long flags;
8
9 spin_lock_irqsave(&desc->lock, flags);
Eric W. Biedermana24ceab2006-10-04 02:16:27 -070010 desc->status |= IRQ_MOVE_PENDING;
Ingo Molnarcd916d32006-06-29 02:24:42 -070011 irq_desc[irq].pending_mask = mask;
Andrew Mortonc777ac52006-03-25 03:07:36 -080012 spin_unlock_irqrestore(&desc->lock, flags);
13}
14
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070015void move_masked_irq(int irq)
Andrew Mortonc777ac52006-03-25 03:07:36 -080016{
Ingo Molnar34ffdb72006-06-29 02:24:40 -070017 struct irq_desc *desc = irq_desc + irq;
Andrew Mortonc777ac52006-03-25 03:07:36 -080018 cpumask_t tmp;
Andrew Mortonc777ac52006-03-25 03:07:36 -080019
Eric W. Biedermana24ceab2006-10-04 02:16:27 -070020 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080021 return;
22
Bryan Holty501f2492006-03-25 03:07:37 -080023 /*
24 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
25 */
26 if (CHECK_IRQ_PER_CPU(desc->status)) {
27 WARN_ON(1);
28 return;
29 }
30
Eric W. Biedermana24ceab2006-10-04 02:16:27 -070031 desc->status &= ~IRQ_MOVE_PENDING;
Andrew Mortonc777ac52006-03-25 03:07:36 -080032
Ingo Molnarcd916d32006-06-29 02:24:42 -070033 if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080034 return;
35
Ingo Molnard1bef4e2006-06-29 02:24:36 -070036 if (!desc->chip->set_affinity)
Andrew Mortonc777ac52006-03-25 03:07:36 -080037 return;
38
Bryan Holty501f2492006-03-25 03:07:37 -080039 assert_spin_locked(&desc->lock);
40
Ingo Molnarcd916d32006-06-29 02:24:42 -070041 cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
Andrew Mortonc777ac52006-03-25 03:07:36 -080042
43 /*
44 * If there was a valid mask to work with, please
45 * do the disable, re-program, enable sequence.
46 * This is *not* particularly important for level triggered
47 * but in a edge trigger case, we might be setting rte
48 * when an active trigger is comming in. This could
49 * cause some ioapics to mal-function.
50 * Being paranoid i guess!
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070051 *
52 * For correct operation this depends on the caller
53 * masking the irqs.
Andrew Mortonc777ac52006-03-25 03:07:36 -080054 */
Daniel Walker89d0cf02006-06-23 02:05:29 -070055 if (likely(!cpus_empty(tmp))) {
Ingo Molnard1bef4e2006-06-29 02:24:36 -070056 desc->chip->set_affinity(irq,tmp);
Andrew Mortonc777ac52006-03-25 03:07:36 -080057 }
Ingo Molnarcd916d32006-06-29 02:24:42 -070058 cpus_clear(irq_desc[irq].pending_mask);
Andrew Mortonc777ac52006-03-25 03:07:36 -080059}
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070060
61void move_native_irq(int irq)
62{
63 struct irq_desc *desc = irq_desc + irq;
64
65 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
66 return;
67
Eric W. Biederman2a786b42007-02-23 04:46:20 -070068 if (unlikely(desc->status & IRQ_DISABLED))
69 return;
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070070
Eric W. Biederman2a786b42007-02-23 04:46:20 -070071 desc->chip->mask(irq);
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070072 move_masked_irq(irq);
Eric W. Biederman2a786b42007-02-23 04:46:20 -070073 desc->chip->unmask(irq);
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070074}
75