blob: 0ae1d4d7e18c9afbe94f5abc8ae44f0c1e93e384 [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080029#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040031#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010032#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070033
Sheng Yang38e20b02010-05-14 12:40:51 +010034#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035#include <asm/ptrace.h>
36#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080037#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040038#include <asm/io_apic.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070039#include <asm/sync_bitops.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010040#include <asm/xen/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070042#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070043
Sheng Yang38e20b02010-05-14 12:40:51 +010044#include <xen/xen.h>
45#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070046#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070047#include <xen/events.h>
48#include <xen/interface/xen.h>
49#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010050#include <xen/interface/hvm/hvm_op.h>
51#include <xen/interface/hvm/params.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070052
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053/*
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */
57static DEFINE_SPINLOCK(irq_mapping_update_lock);
58
Ian Campbell6cb65372011-03-10 16:08:11 +000059static LIST_HEAD(xen_irq_list_head);
60
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070061/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090062static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070063
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070064/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090065static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070066
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080067/* Interrupt types. */
68enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080069 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070070 IRQT_PIRQ,
71 IRQT_VIRQ,
72 IRQT_IPI,
73 IRQT_EVTCHN
74};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070075
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080076/*
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
Stefano Stabellini42a1de52010-06-24 16:42:04 +010082 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080084 * VIRQ - virq number
85 * IPI - IPI vector
86 * EVTCHN -
87 */
88struct irq_info
89{
Ian Campbell6cb65372011-03-10 16:08:11 +000090 struct list_head list;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080091 enum xen_irq_type type; /* type */
Ian Campbell6cb65372011-03-10 16:08:11 +000092 unsigned irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080093 unsigned short evtchn; /* event channel */
94 unsigned short cpu; /* cpu bound */
95
96 union {
97 unsigned short virq;
98 enum ipi_vector ipi;
99 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100100 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800101 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400102 unsigned char vector;
103 unsigned char flags;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800104 } pirq;
105 } u;
106};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400107#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400108#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800109
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400110static int *evtchn_to_irq;
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700111
Ian Campbellcb60d112011-03-10 16:08:08 +0000112static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
113 cpu_evtchn_mask);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700114
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700115/* Xen will never allocate port zero for any purpose. */
116#define VALID_EVTCHN(chn) ((chn) != 0)
117
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700118static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700119static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400120static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100121static void enable_dynirq(struct irq_data *data);
122static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700123
Ian Campbell9158c352011-03-10 16:08:09 +0000124/* Get info for IRQ */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800125static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700126{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100127 return irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700128}
129
Ian Campbell9158c352011-03-10 16:08:09 +0000130/* Constructors for packed IRQ information. */
131static void xen_irq_info_common_init(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000132 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000133 enum xen_irq_type type,
134 unsigned short evtchn,
135 unsigned short cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700136{
Ian Campbell9158c352011-03-10 16:08:09 +0000137
138 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
139
140 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000141 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000142 info->evtchn = evtchn;
143 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000144
145 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700146}
147
Ian Campbell9158c352011-03-10 16:08:09 +0000148static void xen_irq_info_evtchn_init(unsigned irq,
149 unsigned short evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700150{
Ian Campbell9158c352011-03-10 16:08:09 +0000151 struct irq_info *info = info_for_irq(irq);
152
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000153 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700154}
155
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000156static void xen_irq_info_ipi_init(unsigned cpu,
157 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000158 unsigned short evtchn,
159 enum ipi_vector ipi)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700160{
Ian Campbell9158c352011-03-10 16:08:09 +0000161 struct irq_info *info = info_for_irq(irq);
162
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000163 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000164
165 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000166
167 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700168}
169
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000170static void xen_irq_info_virq_init(unsigned cpu,
171 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000172 unsigned short evtchn,
173 unsigned short virq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700174{
Ian Campbell9158c352011-03-10 16:08:09 +0000175 struct irq_info *info = info_for_irq(irq);
176
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000177 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000178
179 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000180
181 per_cpu(virq_to_irq, cpu)[virq] = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000182}
183
184static void xen_irq_info_pirq_init(unsigned irq,
185 unsigned short evtchn,
186 unsigned short pirq,
187 unsigned short gsi,
188 unsigned short vector,
189 unsigned char flags)
190{
191 struct irq_info *info = info_for_irq(irq);
192
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000193 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000194
195 info->u.pirq.pirq = pirq;
196 info->u.pirq.gsi = gsi;
197 info->u.pirq.vector = vector;
198 info->u.pirq.flags = flags;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700199}
200
201/*
202 * Accessors for packed IRQ information.
203 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800204static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700205{
Joe Jin110e7c72011-01-07 14:50:12 +0800206 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
207 return 0;
208
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800209 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700210}
211
Ian Campbelld4c04532009-02-06 19:20:31 -0800212unsigned irq_from_evtchn(unsigned int evtchn)
213{
214 return evtchn_to_irq[evtchn];
215}
216EXPORT_SYMBOL_GPL(irq_from_evtchn);
217
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800218static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700219{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800220 struct irq_info *info = info_for_irq(irq);
221
222 BUG_ON(info == NULL);
223 BUG_ON(info->type != IRQT_IPI);
224
225 return info->u.ipi;
226}
227
228static unsigned virq_from_irq(unsigned irq)
229{
230 struct irq_info *info = info_for_irq(irq);
231
232 BUG_ON(info == NULL);
233 BUG_ON(info->type != IRQT_VIRQ);
234
235 return info->u.virq;
236}
237
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100238static unsigned pirq_from_irq(unsigned irq)
239{
240 struct irq_info *info = info_for_irq(irq);
241
242 BUG_ON(info == NULL);
243 BUG_ON(info->type != IRQT_PIRQ);
244
245 return info->u.pirq.pirq;
246}
247
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800248static enum xen_irq_type type_from_irq(unsigned irq)
249{
250 return info_for_irq(irq)->type;
251}
252
253static unsigned cpu_from_irq(unsigned irq)
254{
255 return info_for_irq(irq)->cpu;
256}
257
258static unsigned int cpu_from_evtchn(unsigned int evtchn)
259{
260 int irq = evtchn_to_irq[evtchn];
261 unsigned ret = 0;
262
263 if (irq != -1)
264 ret = cpu_from_irq(irq);
265
266 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700267}
268
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400269static bool pirq_needs_eoi(unsigned irq)
270{
271 struct irq_info *info = info_for_irq(irq);
272
273 BUG_ON(info->type != IRQT_PIRQ);
274
275 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
276}
277
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700278static inline unsigned long active_evtchns(unsigned int cpu,
279 struct shared_info *sh,
280 unsigned int idx)
281{
282 return (sh->evtchn_pending[idx] &
Ian Campbellcb60d112011-03-10 16:08:08 +0000283 per_cpu(cpu_evtchn_mask, cpu)[idx] &
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700284 ~sh->evtchn_mask[idx]);
285}
286
287static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
288{
289 int irq = evtchn_to_irq[chn];
290
291 BUG_ON(irq == -1);
292#ifdef CONFIG_SMP
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000293 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700294#endif
295
Ian Campbellcb60d112011-03-10 16:08:08 +0000296 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
297 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700298
Ian Campbellca62ce82011-03-10 16:08:12 +0000299 info_for_irq(irq)->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700300}
301
302static void init_evtchn_cpu_bindings(void)
303{
Jan Beulich1c6969e2010-11-16 14:55:33 -0800304 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700305#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000306 struct irq_info *info;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200307
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700308 /* By default all event channels notify CPU#0. */
Ian Campbell6cb65372011-03-10 16:08:11 +0000309 list_for_each_entry(info, &xen_irq_list_head, list) {
310 struct irq_desc *desc = irq_to_desc(info->irq);
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000311 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800312 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700313#endif
314
Jan Beulich1c6969e2010-11-16 14:55:33 -0800315 for_each_possible_cpu(i)
Ian Campbellcb60d112011-03-10 16:08:08 +0000316 memset(per_cpu(cpu_evtchn_mask, i),
317 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700318}
319
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700320static inline void clear_evtchn(int port)
321{
322 struct shared_info *s = HYPERVISOR_shared_info;
323 sync_clear_bit(port, &s->evtchn_pending[0]);
324}
325
326static inline void set_evtchn(int port)
327{
328 struct shared_info *s = HYPERVISOR_shared_info;
329 sync_set_bit(port, &s->evtchn_pending[0]);
330}
331
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700332static inline int test_evtchn(int port)
333{
334 struct shared_info *s = HYPERVISOR_shared_info;
335 return sync_test_bit(port, &s->evtchn_pending[0]);
336}
337
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700338
339/**
340 * notify_remote_via_irq - send event to remote end of event channel via irq
341 * @irq: irq of event channel to send event to
342 *
343 * Unlike notify_remote_via_evtchn(), this is safe to use across
344 * save/restore. Notifications on a broken connection are silently
345 * dropped.
346 */
347void notify_remote_via_irq(int irq)
348{
349 int evtchn = evtchn_from_irq(irq);
350
351 if (VALID_EVTCHN(evtchn))
352 notify_remote_via_evtchn(evtchn);
353}
354EXPORT_SYMBOL_GPL(notify_remote_via_irq);
355
356static void mask_evtchn(int port)
357{
358 struct shared_info *s = HYPERVISOR_shared_info;
359 sync_set_bit(port, &s->evtchn_mask[0]);
360}
361
362static void unmask_evtchn(int port)
363{
364 struct shared_info *s = HYPERVISOR_shared_info;
365 unsigned int cpu = get_cpu();
366
367 BUG_ON(!irqs_disabled());
368
369 /* Slow path (hypercall) if this is a non-local port. */
370 if (unlikely(cpu != cpu_from_evtchn(port))) {
371 struct evtchn_unmask unmask = { .port = port };
372 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
373 } else {
Christoph Lameter780f36d2010-12-06 11:16:29 -0600374 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700375
376 sync_clear_bit(port, &s->evtchn_mask[0]);
377
378 /*
379 * The following is basically the equivalent of
380 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
381 * the interrupt edge' if the channel is masked.
382 */
383 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
384 !sync_test_and_set_bit(port / BITS_PER_LONG,
385 &vcpu_info->evtchn_pending_sel))
386 vcpu_info->evtchn_upcall_pending = 1;
387 }
388
389 put_cpu();
390}
391
Ian Campbell6cb65372011-03-10 16:08:11 +0000392static void xen_irq_init(unsigned irq)
393{
394 struct irq_info *info;
395 struct irq_desc *desc = irq_to_desc(irq);
396
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400397#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000398 /* By default all event channels notify CPU#0. */
399 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400400#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000401
Ian Campbellca62ce82011-03-10 16:08:12 +0000402 info = kzalloc(sizeof(*info), GFP_KERNEL);
403 if (info == NULL)
404 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000405
406 info->type = IRQT_UNBOUND;
407
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100408 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000409
Ian Campbell6cb65372011-03-10 16:08:11 +0000410 list_add_tail(&info->list, &xen_irq_list_head);
411}
412
Ian Campbell7bee9762011-03-10 16:08:15 +0000413static int __must_check xen_allocate_irq_dynamic(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700414{
Ian Campbell89911502011-03-03 11:57:44 -0500415 int first = 0;
416 int irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700417
Ian Campbell89911502011-03-03 11:57:44 -0500418#ifdef CONFIG_X86_IO_APIC
419 /*
420 * For an HVM guest or domain 0 which see "real" (emulated or
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300421 * actual respectively) GSIs we allocate dynamic IRQs
Ian Campbell89911502011-03-03 11:57:44 -0500422 * e.g. those corresponding to event channels or MSIs
423 * etc. from the range above those "real" GSIs to avoid
424 * collisions.
Konrad Rzeszutek Wilkd1b758e2010-12-09 14:53:29 -0500425 */
Ian Campbell89911502011-03-03 11:57:44 -0500426 if (xen_initial_domain() || xen_hvm_domain())
427 first = get_nr_irqs_gsi();
428#endif
429
Ian Campbell89911502011-03-03 11:57:44 -0500430 irq = irq_alloc_desc_from(first, -1);
431
Ian Campbell6cb65372011-03-10 16:08:11 +0000432 xen_irq_init(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800433
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700434 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400435}
436
Ian Campbell7bee9762011-03-10 16:08:15 +0000437static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000438{
439 int irq;
440
Ian Campbell89911502011-03-03 11:57:44 -0500441 /*
442 * A PV guest has no concept of a GSI (since it has no ACPI
443 * nor access to/knowledge of the physical APICs). Therefore
444 * all IRQs are dynamically allocated from the entire IRQ
445 * space.
446 */
447 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000448 return xen_allocate_irq_dynamic();
449
450 /* Legacy IRQ descriptors are already allocated by the arch. */
451 if (gsi < NR_IRQS_LEGACY)
Ian Campbell6cb65372011-03-10 16:08:11 +0000452 irq = gsi;
453 else
454 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000455
Ian Campbell6cb65372011-03-10 16:08:11 +0000456 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000457
458 return irq;
459}
460
461static void xen_free_irq(unsigned irq)
462{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100463 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000464
465 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000466
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100467 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000468
469 kfree(info);
470
Ian Campbell72146102011-02-03 09:49:35 +0000471 /* Legacy IRQ descriptors are managed by the arch. */
472 if (irq < NR_IRQS_LEGACY)
473 return;
474
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000475 irq_free_desc(irq);
476}
477
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400478static void pirq_query_unmask(int irq)
479{
480 struct physdev_irq_status_query irq_status;
481 struct irq_info *info = info_for_irq(irq);
482
483 BUG_ON(info->type != IRQT_PIRQ);
484
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100485 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400486 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
487 irq_status.flags = 0;
488
489 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
490 if (irq_status.flags & XENIRQSTAT_needs_eoi)
491 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
492}
493
494static bool probing_irq(int irq)
495{
496 struct irq_desc *desc = irq_to_desc(irq);
497
498 return desc && desc->action == NULL;
499}
500
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100501static void eoi_pirq(struct irq_data *data)
502{
503 int evtchn = evtchn_from_irq(data->irq);
504 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
505 int rc = 0;
506
507 irq_move_irq(data);
508
509 if (VALID_EVTCHN(evtchn))
510 clear_evtchn(evtchn);
511
512 if (pirq_needs_eoi(data->irq)) {
513 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
514 WARN_ON(rc);
515 }
516}
517
518static void mask_ack_pirq(struct irq_data *data)
519{
520 disable_dynirq(data);
521 eoi_pirq(data);
522}
523
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000524static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400525{
526 struct evtchn_bind_pirq bind_pirq;
527 struct irq_info *info = info_for_irq(irq);
528 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400529 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400530
531 BUG_ON(info->type != IRQT_PIRQ);
532
533 if (VALID_EVTCHN(evtchn))
534 goto out;
535
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100536 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400537 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400538 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
539 BIND_PIRQ__WILL_SHARE : 0;
540 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
541 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400542 if (!probing_irq(irq))
543 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
544 irq);
545 return 0;
546 }
547 evtchn = bind_pirq.port;
548
549 pirq_query_unmask(irq);
550
551 evtchn_to_irq[evtchn] = irq;
552 bind_evtchn_to_cpu(evtchn, 0);
553 info->evtchn = evtchn;
554
555out:
556 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100557 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400558
559 return 0;
560}
561
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000562static unsigned int startup_pirq(struct irq_data *data)
563{
564 return __startup_pirq(data->irq);
565}
566
567static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400568{
569 struct evtchn_close close;
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000570 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400571 struct irq_info *info = info_for_irq(irq);
572 int evtchn = evtchn_from_irq(irq);
573
574 BUG_ON(info->type != IRQT_PIRQ);
575
576 if (!VALID_EVTCHN(evtchn))
577 return;
578
579 mask_evtchn(evtchn);
580
581 close.port = evtchn;
582 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
583 BUG();
584
585 bind_evtchn_to_cpu(evtchn, 0);
586 evtchn_to_irq[evtchn] = -1;
587 info->evtchn = 0;
588}
589
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000590static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400591{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000592 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400593}
594
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000595static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400596{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100597 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400598}
599
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400600static int find_irq_by_gsi(unsigned gsi)
601{
Ian Campbell6cb65372011-03-10 16:08:11 +0000602 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400603
Ian Campbell6cb65372011-03-10 16:08:11 +0000604 list_for_each_entry(info, &xen_irq_list_head, list) {
605 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400606 continue;
607
Ian Campbell6cb65372011-03-10 16:08:11 +0000608 if (info->u.pirq.gsi == gsi)
609 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400610 }
611
612 return -1;
613}
614
Ian Campbellf4d06352011-03-10 16:08:07 +0000615int xen_allocate_pirq_gsi(unsigned gsi)
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100616{
Ian Campbellf4d06352011-03-10 16:08:07 +0000617 return gsi;
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100618}
619
Ian Campbell653378a2011-03-10 16:08:04 +0000620/*
621 * Do not make any assumptions regarding the relationship between the
622 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100623 *
624 * Note: We don't assign an event channel until the irq actually started
625 * up. Return an existing irq if we've already got one for the gsi.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400626 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000627int xen_bind_pirq_gsi_to_irq(unsigned gsi,
628 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400629{
Ian Campbella0e18112011-03-10 16:08:03 +0000630 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400631 struct physdev_irq irq_op;
632
633 spin_lock(&irq_mapping_update_lock);
634
635 irq = find_irq_by_gsi(gsi);
636 if (irq != -1) {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100637 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400638 irq, gsi);
639 goto out; /* XXX need refcount? */
640 }
641
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000642 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000643 if (irq < 0)
644 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400645
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400646 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400647 irq_op.vector = 0;
648
649 /* Only the privileged domain can do this. For non-priv, the pcifront
650 * driver provides a PCI bus that does the call to do exactly
651 * this in the priv domain. */
652 if (xen_initial_domain() &&
653 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000654 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400655 irq = -ENOSPC;
656 goto out;
657 }
658
Ian Campbell9158c352011-03-10 16:08:09 +0000659 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
660 shareable ? PIRQ_SHAREABLE : 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400661
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100662 pirq_query_unmask(irq);
663 /* We try to use the handler with the appropriate semantic for the
664 * type of interrupt: if the interrupt doesn't need an eoi
665 * (pirq_needs_eoi returns false), we treat it like an edge
666 * triggered interrupt so we use handle_edge_irq.
667 * As a matter of fact this only happens when the corresponding
668 * physical interrupt is edge triggered or an msi.
669 *
670 * On the other hand if the interrupt needs an eoi (pirq_needs_eoi
671 * returns true) we treat it like a level triggered interrupt so we
672 * use handle_fasteoi_irq like the native code does for this kind of
673 * interrupts.
674 * Depending on the Xen version, pirq_needs_eoi might return true
675 * not only for level triggered interrupts but for edge triggered
676 * interrupts too. In any case Xen always honors the eoi mechanism,
677 * not injecting any more pirqs of the same kind if the first one
678 * hasn't received an eoi yet. Therefore using the fasteoi handler
679 * is the right choice either way.
680 */
681 if (pirq_needs_eoi(irq))
682 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
683 handle_fasteoi_irq, name);
684 else
685 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
686 handle_edge_irq, name);
687
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400688out:
689 spin_unlock(&irq_mapping_update_lock);
690
691 return irq;
692}
693
Qing Hef731e3ef2010-10-11 15:30:09 +0100694#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000695int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000696{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000697 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000698 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000699
Ian Campbellbf480d92011-02-18 16:43:32 +0000700 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000701 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000702
Ian Campbell5cad61a2011-02-18 16:43:31 +0000703 WARN_ONCE(rc == -ENOSYS,
704 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
705
706 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000707}
708
Ian Campbellbf480d92011-02-18 16:43:32 +0000709int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Ian Campbellca1d8fe2011-02-18 16:43:36 +0000710 int pirq, int vector, const char *name)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100711{
Ian Campbellbf480d92011-02-18 16:43:32 +0000712 int irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000713
Stefano Stabellini809f9262010-07-01 17:10:39 +0100714 spin_lock(&irq_mapping_update_lock);
715
Ian Campbell4b41df72011-02-18 16:43:29 +0000716 irq = xen_allocate_irq_dynamic();
717 if (irq == -1)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000718 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100719
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100720 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
721 name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100722
Ian Campbell9158c352011-03-10 16:08:09 +0000723 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700724 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000725 if (ret < 0)
726 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100727out:
728 spin_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000729 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000730error_irq:
731 spin_unlock(&irq_mapping_update_lock);
732 xen_free_irq(irq);
733 return -1;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100734}
Qing Hef731e3ef2010-10-11 15:30:09 +0100735#endif
736
Alex Nixonb5401a92010-03-18 16:31:34 -0400737int xen_destroy_irq(int irq)
738{
739 struct irq_desc *desc;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100740 struct physdev_unmap_pirq unmap_irq;
741 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400742 int rc = -ENOENT;
743
744 spin_lock(&irq_mapping_update_lock);
745
746 desc = irq_to_desc(irq);
747 if (!desc)
748 goto out;
749
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100750 if (xen_initial_domain()) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500751 unmap_irq.pirq = info->u.pirq.pirq;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100752 unmap_irq.domid = DOMID_SELF;
753 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
754 if (rc) {
755 printk(KERN_WARNING "unmap irq failed %d\n", rc);
756 goto out;
757 }
758 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400759
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000760 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400761
762out:
763 spin_unlock(&irq_mapping_update_lock);
764 return rc;
765}
766
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000767int xen_irq_from_pirq(unsigned pirq)
768{
Ian Campbell69c358c2011-03-10 16:08:13 +0000769 int irq;
770
771 struct irq_info *info;
772
773 spin_lock(&irq_mapping_update_lock);
774
775 list_for_each_entry(info, &xen_irq_list_head, list) {
776 if (info == NULL || info->type != IRQT_PIRQ)
777 continue;
778 irq = info->irq;
779 if (info->u.pirq.pirq == pirq)
780 goto out;
781 }
782 irq = -1;
783out:
Ian Campbella7b807c2011-03-14 09:50:39 +0000784 spin_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000785
786 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000787}
788
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700789int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700790{
791 int irq;
792
793 spin_lock(&irq_mapping_update_lock);
794
795 irq = evtchn_to_irq[evtchn];
796
797 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000798 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000799 if (irq == -1)
800 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700801
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100802 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100803 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700804
Ian Campbell9158c352011-03-10 16:08:09 +0000805 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700806 }
807
Ian Campbell7bee9762011-03-10 16:08:15 +0000808out:
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700809 spin_unlock(&irq_mapping_update_lock);
810
811 return irq;
812}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700813EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700814
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700815static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
816{
817 struct evtchn_bind_ipi bind_ipi;
818 int evtchn, irq;
819
820 spin_lock(&irq_mapping_update_lock);
821
822 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800823
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700824 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000825 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700826 if (irq < 0)
827 goto out;
828
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100829 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700830 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700831
832 bind_ipi.vcpu = cpu;
833 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
834 &bind_ipi) != 0)
835 BUG();
836 evtchn = bind_ipi.port;
837
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000838 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700839
840 bind_evtchn_to_cpu(evtchn, cpu);
841 }
842
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700843 out:
844 spin_unlock(&irq_mapping_update_lock);
845 return irq;
846}
847
Ian Campbell2e820f52009-02-09 12:05:50 -0800848static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
849 unsigned int remote_port)
850{
851 struct evtchn_bind_interdomain bind_interdomain;
852 int err;
853
854 bind_interdomain.remote_dom = remote_domain;
855 bind_interdomain.remote_port = remote_port;
856
857 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
858 &bind_interdomain);
859
860 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
861}
862
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700863
Jeremy Fitzhardinge4fe7d5a2010-09-02 16:17:06 +0100864int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700865{
866 struct evtchn_bind_virq bind_virq;
867 int evtchn, irq;
868
869 spin_lock(&irq_mapping_update_lock);
870
871 irq = per_cpu(virq_to_irq, cpu)[virq];
872
873 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000874 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000875 if (irq == -1)
876 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700877
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100878 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700879 handle_percpu_irq, "virq");
880
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700881 bind_virq.virq = virq;
882 bind_virq.vcpu = cpu;
883 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
884 &bind_virq) != 0)
885 BUG();
886 evtchn = bind_virq.port;
887
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000888 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700889
890 bind_evtchn_to_cpu(evtchn, cpu);
891 }
892
Ian Campbell7bee9762011-03-10 16:08:15 +0000893out:
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700894 spin_unlock(&irq_mapping_update_lock);
895
896 return irq;
897}
898
899static void unbind_from_irq(unsigned int irq)
900{
901 struct evtchn_close close;
902 int evtchn = evtchn_from_irq(irq);
903
904 spin_lock(&irq_mapping_update_lock);
905
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800906 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700907 close.port = evtchn;
908 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
909 BUG();
910
911 switch (type_from_irq(irq)) {
912 case IRQT_VIRQ:
913 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800914 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700915 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100916 case IRQT_IPI:
917 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800918 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100919 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700920 default:
921 break;
922 }
923
924 /* Closed ports are implicitly re-bound to VCPU0. */
925 bind_evtchn_to_cpu(evtchn, 0);
926
927 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +0000928 }
929
Ian Campbellca62ce82011-03-10 16:08:12 +0000930 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700931
Ian Campbell9158c352011-03-10 16:08:09 +0000932 xen_free_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700933
934 spin_unlock(&irq_mapping_update_lock);
935}
936
937int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400938 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700939 unsigned long irqflags,
940 const char *devname, void *dev_id)
941{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200942 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700943
944 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +0000945 if (irq < 0)
946 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700947 retval = request_irq(irq, handler, irqflags, devname, dev_id);
948 if (retval != 0) {
949 unbind_from_irq(irq);
950 return retval;
951 }
952
953 return irq;
954}
955EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
956
Ian Campbell2e820f52009-02-09 12:05:50 -0800957int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
958 unsigned int remote_port,
959 irq_handler_t handler,
960 unsigned long irqflags,
961 const char *devname,
962 void *dev_id)
963{
964 int irq, retval;
965
966 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
967 if (irq < 0)
968 return irq;
969
970 retval = request_irq(irq, handler, irqflags, devname, dev_id);
971 if (retval != 0) {
972 unbind_from_irq(irq);
973 return retval;
974 }
975
976 return irq;
977}
978EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
979
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700980int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -0400981 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700982 unsigned long irqflags, const char *devname, void *dev_id)
983{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200984 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700985
986 irq = bind_virq_to_irq(virq, cpu);
Ian Campbell7bee9762011-03-10 16:08:15 +0000987 if (irq < 0)
988 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700989 retval = request_irq(irq, handler, irqflags, devname, dev_id);
990 if (retval != 0) {
991 unbind_from_irq(irq);
992 return retval;
993 }
994
995 return irq;
996}
997EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
998
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700999int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1000 unsigned int cpu,
1001 irq_handler_t handler,
1002 unsigned long irqflags,
1003 const char *devname,
1004 void *dev_id)
1005{
1006 int irq, retval;
1007
1008 irq = bind_ipi_to_irq(ipi, cpu);
1009 if (irq < 0)
1010 return irq;
1011
Thomas Gleixner676dc3c2011-02-05 20:08:59 +00001012 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001013 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1014 if (retval != 0) {
1015 unbind_from_irq(irq);
1016 return retval;
1017 }
1018
1019 return irq;
1020}
1021
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001022void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1023{
1024 free_irq(irq, dev_id);
1025 unbind_from_irq(irq);
1026}
1027EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1028
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001029void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1030{
1031 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1032 BUG_ON(irq < 0);
1033 notify_remote_via_irq(irq);
1034}
1035
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001036irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1037{
1038 struct shared_info *sh = HYPERVISOR_shared_info;
1039 int cpu = smp_processor_id();
Ian Campbellcb60d112011-03-10 16:08:08 +00001040 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001041 int i;
1042 unsigned long flags;
1043 static DEFINE_SPINLOCK(debug_lock);
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001044 struct vcpu_info *v;
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001045
1046 spin_lock_irqsave(&debug_lock, flags);
1047
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001048 printk("\nvcpu %d\n ", cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001049
1050 for_each_online_cpu(i) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001051 int pending;
1052 v = per_cpu(xen_vcpu, i);
1053 pending = (get_irq_regs() && i == cpu)
1054 ? xen_irqs_disabled(get_irq_regs())
1055 : v->evtchn_upcall_mask;
1056 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1057 pending, v->evtchn_upcall_pending,
1058 (int)(sizeof(v->evtchn_pending_sel)*2),
1059 v->evtchn_pending_sel);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001060 }
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001061 v = per_cpu(xen_vcpu, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001062
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001063 printk("\npending:\n ");
1064 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1065 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1066 sh->evtchn_pending[i],
1067 i % 8 == 0 ? "\n " : " ");
1068 printk("\nglobal mask:\n ");
1069 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1070 printk("%0*lx%s",
1071 (int)(sizeof(sh->evtchn_mask[0])*2),
1072 sh->evtchn_mask[i],
1073 i % 8 == 0 ? "\n " : " ");
1074
1075 printk("\nglobally unmasked:\n ");
1076 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1077 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1078 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1079 i % 8 == 0 ? "\n " : " ");
1080
1081 printk("\nlocal cpu%d mask:\n ", cpu);
1082 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1083 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1084 cpu_evtchn[i],
1085 i % 8 == 0 ? "\n " : " ");
1086
1087 printk("\nlocally unmasked:\n ");
1088 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1089 unsigned long pending = sh->evtchn_pending[i]
1090 & ~sh->evtchn_mask[i]
1091 & cpu_evtchn[i];
1092 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1093 pending, i % 8 == 0 ? "\n " : " ");
1094 }
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001095
1096 printk("\npending list:\n");
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001097 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001098 if (sync_test_bit(i, sh->evtchn_pending)) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001099 int word_idx = i / BITS_PER_LONG;
1100 printk(" %d: event %d -> irq %d%s%s%s\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001101 cpu_from_evtchn(i), i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001102 evtchn_to_irq[i],
1103 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1104 ? "" : " l2-clear",
1105 !sync_test_bit(i, sh->evtchn_mask)
1106 ? "" : " globally-masked",
1107 sync_test_bit(i, cpu_evtchn)
1108 ? "" : " locally-masked");
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001109 }
1110 }
1111
1112 spin_unlock_irqrestore(&debug_lock, flags);
1113
1114 return IRQ_HANDLED;
1115}
1116
Tejun Heo245b2e72009-06-24 15:13:48 +09001117static DEFINE_PER_CPU(unsigned, xed_nesting_count);
Keir Fraserada68142011-03-03 10:01:11 +00001118static DEFINE_PER_CPU(unsigned int, current_word_idx);
1119static DEFINE_PER_CPU(unsigned int, current_bit_idx);
Tejun Heo245b2e72009-06-24 15:13:48 +09001120
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001121/*
Scott Rixnerab7f8632011-03-03 09:30:08 +00001122 * Mask out the i least significant bits of w
1123 */
1124#define MASK_LSBS(w, i) (w & ((~0UL) << i))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001125
1126/*
1127 * Search the CPUs pending events bitmasks. For each one found, map
1128 * the event number to an irq, and feed it into do_IRQ() for
1129 * handling.
1130 *
1131 * Xen uses a two-level bitmap to speed searching. The first level is
1132 * a bitset of words which contain pending event bits. The second
1133 * level is a bitset of pending events themselves.
1134 */
Sheng Yang38e20b02010-05-14 12:40:51 +01001135static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001136{
Keir Fraser24b51c22011-03-03 11:06:28 +00001137 int start_word_idx, start_bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001138 int word_idx, bit_idx;
Keir Fraser24b51c22011-03-03 11:06:28 +00001139 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001140 int cpu = get_cpu();
1141 struct shared_info *s = HYPERVISOR_shared_info;
Christoph Lameter780f36d2010-12-06 11:16:29 -06001142 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001143 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001144
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001145 do {
1146 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001147
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001148 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001149
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001150 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001151 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001152
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001153#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1154 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -07001155 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001156#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001157 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001158
Keir Fraser24b51c22011-03-03 11:06:28 +00001159 start_word_idx = __this_cpu_read(current_word_idx);
1160 start_bit_idx = __this_cpu_read(current_bit_idx);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001161
Keir Fraser24b51c22011-03-03 11:06:28 +00001162 word_idx = start_word_idx;
1163
1164 for (i = 0; pending_words != 0; i++) {
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001165 unsigned long pending_bits;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001166 unsigned long words;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001167
Scott Rixnerab7f8632011-03-03 09:30:08 +00001168 words = MASK_LSBS(pending_words, word_idx);
1169
1170 /*
Keir Fraserada68142011-03-03 10:01:11 +00001171 * If we masked out all events, wrap to beginning.
Scott Rixnerab7f8632011-03-03 09:30:08 +00001172 */
1173 if (words == 0) {
Keir Fraserada68142011-03-03 10:01:11 +00001174 word_idx = 0;
1175 bit_idx = 0;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001176 continue;
1177 }
1178 word_idx = __ffs(words);
1179
Keir Fraser24b51c22011-03-03 11:06:28 +00001180 pending_bits = active_evtchns(cpu, s, word_idx);
1181 bit_idx = 0; /* usually scan entire word from start */
1182 if (word_idx == start_word_idx) {
1183 /* We scan the starting word in two parts */
1184 if (i == 0)
1185 /* 1st time: start in the middle */
1186 bit_idx = start_bit_idx;
1187 else
1188 /* 2nd time: mask bits done already */
1189 bit_idx &= (1UL << start_bit_idx) - 1;
1190 }
1191
Scott Rixnerab7f8632011-03-03 09:30:08 +00001192 do {
1193 unsigned long bits;
1194 int port, irq;
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001195 struct irq_desc *desc;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001196
Scott Rixnerab7f8632011-03-03 09:30:08 +00001197 bits = MASK_LSBS(pending_bits, bit_idx);
1198
1199 /* If we masked out all events, move on. */
Keir Fraserada68142011-03-03 10:01:11 +00001200 if (bits == 0)
Scott Rixnerab7f8632011-03-03 09:30:08 +00001201 break;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001202
1203 bit_idx = __ffs(bits);
1204
1205 /* Process port. */
1206 port = (word_idx * BITS_PER_LONG) + bit_idx;
1207 irq = evtchn_to_irq[port];
1208
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001209 if (irq != -1) {
1210 desc = irq_to_desc(irq);
1211 if (desc)
1212 generic_handle_irq_desc(irq, desc);
1213 }
Scott Rixnerab7f8632011-03-03 09:30:08 +00001214
Keir Fraserada68142011-03-03 10:01:11 +00001215 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1216
1217 /* Next caller starts at last processed + 1 */
1218 __this_cpu_write(current_word_idx,
1219 bit_idx ? word_idx :
1220 (word_idx+1) % BITS_PER_LONG);
1221 __this_cpu_write(current_bit_idx, bit_idx);
1222 } while (bit_idx != 0);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001223
Keir Fraser24b51c22011-03-03 11:06:28 +00001224 /* Scan start_l1i twice; all others once. */
1225 if ((word_idx != start_word_idx) || (i != 0))
Scott Rixnerab7f8632011-03-03 09:30:08 +00001226 pending_words &= ~(1UL << word_idx);
Keir Fraserada68142011-03-03 10:01:11 +00001227
1228 word_idx = (word_idx + 1) % BITS_PER_LONG;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001229 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001230
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001231 BUG_ON(!irqs_disabled());
1232
Christoph Lameter780f36d2010-12-06 11:16:29 -06001233 count = __this_cpu_read(xed_nesting_count);
1234 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001235 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001236
1237out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001238
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001239 put_cpu();
1240}
1241
Sheng Yang38e20b02010-05-14 12:40:51 +01001242void xen_evtchn_do_upcall(struct pt_regs *regs)
1243{
1244 struct pt_regs *old_regs = set_irq_regs(regs);
1245
1246 exit_idle();
1247 irq_enter();
1248
1249 __xen_evtchn_do_upcall();
1250
1251 irq_exit();
1252 set_irq_regs(old_regs);
1253}
1254
1255void xen_hvm_evtchn_do_upcall(void)
1256{
1257 __xen_evtchn_do_upcall();
1258}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001259EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001260
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001261/* Rebind a new event channel to an existing irq. */
1262void rebind_evtchn_irq(int evtchn, int irq)
1263{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001264 struct irq_info *info = info_for_irq(irq);
1265
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001266 /* Make sure the irq is masked, since the new event channel
1267 will also be masked. */
1268 disable_irq(irq);
1269
1270 spin_lock(&irq_mapping_update_lock);
1271
1272 /* After resume the irq<->evtchn mappings are all cleared out */
1273 BUG_ON(evtchn_to_irq[evtchn] != -1);
1274 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001275 so there should be a proper type */
1276 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001277
Ian Campbell9158c352011-03-10 16:08:09 +00001278 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001279
1280 spin_unlock(&irq_mapping_update_lock);
1281
1282 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301283 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001284
1285 /* Unmask the event channel. */
1286 enable_irq(irq);
1287}
1288
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001289/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001290static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001291{
1292 struct evtchn_bind_vcpu bind_vcpu;
1293 int evtchn = evtchn_from_irq(irq);
1294
Ian Campbellbe494722011-03-10 16:08:02 +00001295 if (!VALID_EVTCHN(evtchn))
1296 return -1;
1297
1298 /*
1299 * Events delivered via platform PCI interrupts are always
1300 * routed to vcpu 0 and hence cannot be rebound.
1301 */
1302 if (xen_hvm_domain() && !xen_have_vector_callback)
Yinghai Lud5dedd42009-04-27 17:59:21 -07001303 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001304
1305 /* Send future instances of this interrupt to other vcpu. */
1306 bind_vcpu.port = evtchn;
1307 bind_vcpu.vcpu = tcpu;
1308
1309 /*
1310 * If this fails, it usually just indicates that we're dealing with a
1311 * virq or IPI channel, which don't actually need to be rebound. Ignore
1312 * it, but don't do the xenlinux-level rebind in that case.
1313 */
1314 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1315 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001316
1317 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001318}
1319
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001320static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1321 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001322{
Rusty Russell0de26522008-12-13 21:20:26 +10301323 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001324
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001325 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001326}
1327
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001328int resend_irq_on_evtchn(unsigned int irq)
1329{
1330 int masked, evtchn = evtchn_from_irq(irq);
1331 struct shared_info *s = HYPERVISOR_shared_info;
1332
1333 if (!VALID_EVTCHN(evtchn))
1334 return 1;
1335
1336 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1337 sync_set_bit(evtchn, s->evtchn_pending);
1338 if (!masked)
1339 unmask_evtchn(evtchn);
1340
1341 return 1;
1342}
1343
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001344static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001345{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001346 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001347
1348 if (VALID_EVTCHN(evtchn))
1349 unmask_evtchn(evtchn);
1350}
1351
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001352static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001353{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001354 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001355
1356 if (VALID_EVTCHN(evtchn))
1357 mask_evtchn(evtchn);
1358}
1359
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001360static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001361{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001362 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001363
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001364 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001365
1366 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001367 clear_evtchn(evtchn);
1368}
1369
1370static void mask_ack_dynirq(struct irq_data *data)
1371{
1372 disable_dynirq(data);
1373 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001374}
1375
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001376static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001377{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001378 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001379 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001380 int ret = 0;
1381
1382 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001383 int masked;
1384
1385 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1386 sync_set_bit(evtchn, sh->evtchn_pending);
1387 if (!masked)
1388 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001389 ret = 1;
1390 }
1391
1392 return ret;
1393}
1394
Ian Campbell0a852262011-03-10 16:08:06 +00001395static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001396{
1397 int pirq, rc, irq, gsi;
1398 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001399 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001400
Ian Campbell69c358c2011-03-10 16:08:13 +00001401 list_for_each_entry(info, &xen_irq_list_head, list) {
1402 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001403 continue;
1404
Ian Campbell69c358c2011-03-10 16:08:13 +00001405 pirq = info->u.pirq.pirq;
1406 gsi = info->u.pirq.gsi;
1407 irq = info->irq;
1408
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001409 /* save/restore of PT devices doesn't work, so at this point the
1410 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001411 if (!gsi)
1412 continue;
1413
1414 map_irq.domid = DOMID_SELF;
1415 map_irq.type = MAP_PIRQ_TYPE_GSI;
1416 map_irq.index = gsi;
1417 map_irq.pirq = pirq;
1418
1419 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1420 if (rc) {
1421 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1422 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001423 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001424 continue;
1425 }
1426
1427 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1428
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001429 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001430 }
1431}
1432
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001433static void restore_cpu_virqs(unsigned int cpu)
1434{
1435 struct evtchn_bind_virq bind_virq;
1436 int virq, irq, evtchn;
1437
1438 for (virq = 0; virq < NR_VIRQS; virq++) {
1439 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1440 continue;
1441
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001442 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001443
1444 /* Get a new binding from Xen. */
1445 bind_virq.virq = virq;
1446 bind_virq.vcpu = cpu;
1447 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1448 &bind_virq) != 0)
1449 BUG();
1450 evtchn = bind_virq.port;
1451
1452 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001453 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001454 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001455 }
1456}
1457
1458static void restore_cpu_ipis(unsigned int cpu)
1459{
1460 struct evtchn_bind_ipi bind_ipi;
1461 int ipi, irq, evtchn;
1462
1463 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1464 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1465 continue;
1466
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001467 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001468
1469 /* Get a new binding from Xen. */
1470 bind_ipi.vcpu = cpu;
1471 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1472 &bind_ipi) != 0)
1473 BUG();
1474 evtchn = bind_ipi.port;
1475
1476 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001477 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001478 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001479 }
1480}
1481
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001482/* Clear an irq's pending state, in preparation for polling on it */
1483void xen_clear_irq_pending(int irq)
1484{
1485 int evtchn = evtchn_from_irq(irq);
1486
1487 if (VALID_EVTCHN(evtchn))
1488 clear_evtchn(evtchn);
1489}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001490EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001491void xen_set_irq_pending(int irq)
1492{
1493 int evtchn = evtchn_from_irq(irq);
1494
1495 if (VALID_EVTCHN(evtchn))
1496 set_evtchn(evtchn);
1497}
1498
1499bool xen_test_irq_pending(int irq)
1500{
1501 int evtchn = evtchn_from_irq(irq);
1502 bool ret = false;
1503
1504 if (VALID_EVTCHN(evtchn))
1505 ret = test_evtchn(evtchn);
1506
1507 return ret;
1508}
1509
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001510/* Poll waiting for an irq to become pending with timeout. In the usual case,
1511 * the irq will be disabled so it won't deliver an interrupt. */
1512void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001513{
1514 evtchn_port_t evtchn = evtchn_from_irq(irq);
1515
1516 if (VALID_EVTCHN(evtchn)) {
1517 struct sched_poll poll;
1518
1519 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001520 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001521 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001522
1523 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1524 BUG();
1525 }
1526}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001527EXPORT_SYMBOL(xen_poll_irq_timeout);
1528/* Poll waiting for an irq to become pending. In the usual case, the
1529 * irq will be disabled so it won't deliver an interrupt. */
1530void xen_poll_irq(int irq)
1531{
1532 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1533}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001534
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001535void xen_irq_resume(void)
1536{
Ian Campbell6cb65372011-03-10 16:08:11 +00001537 unsigned int cpu, evtchn;
1538 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001539
1540 init_evtchn_cpu_bindings();
1541
1542 /* New event-channel space is not 'live' yet. */
1543 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1544 mask_evtchn(evtchn);
1545
1546 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001547 list_for_each_entry(info, &xen_irq_list_head, list)
1548 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001549
1550 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1551 evtchn_to_irq[evtchn] = -1;
1552
1553 for_each_possible_cpu(cpu) {
1554 restore_cpu_virqs(cpu);
1555 restore_cpu_ipis(cpu);
1556 }
Ian Campbell69035912010-11-01 16:30:09 +00001557
Ian Campbell0a852262011-03-10 16:08:06 +00001558 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001559}
1560
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001561static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001562 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001563
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001564 .irq_disable = disable_dynirq,
1565 .irq_mask = disable_dynirq,
1566 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001567
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001568 .irq_ack = ack_dynirq,
1569 .irq_mask_ack = mask_ack_dynirq,
1570
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001571 .irq_set_affinity = set_affinity_irq,
1572 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001573};
1574
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001575static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001576 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001577
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001578 .irq_startup = startup_pirq,
1579 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001580 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001581 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001582
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001583 .irq_mask = disable_dynirq,
1584 .irq_unmask = enable_dynirq,
1585
1586 .irq_ack = eoi_pirq,
1587 .irq_eoi = eoi_pirq,
1588 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001589
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001590 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001591
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001592 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001593};
1594
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001595static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001596 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001597
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001598 .irq_disable = disable_dynirq,
1599 .irq_mask = disable_dynirq,
1600 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001601
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001602 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001603};
1604
Sheng Yang38e20b02010-05-14 12:40:51 +01001605int xen_set_callback_via(uint64_t via)
1606{
1607 struct xen_hvm_param a;
1608 a.domid = DOMID_SELF;
1609 a.index = HVM_PARAM_CALLBACK_IRQ;
1610 a.value = via;
1611 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1612}
1613EXPORT_SYMBOL_GPL(xen_set_callback_via);
1614
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001615#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001616/* Vector callbacks are better than PCI interrupts to receive event
1617 * channel notifications because we can receive vector callbacks on any
1618 * vcpu and we don't need PCI support or APIC interactions. */
1619void xen_callback_vector(void)
1620{
1621 int rc;
1622 uint64_t callback_via;
1623 if (xen_have_vector_callback) {
1624 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1625 rc = xen_set_callback_via(callback_via);
1626 if (rc) {
1627 printk(KERN_ERR "Request for Xen HVM callback vector"
1628 " failed.\n");
1629 xen_have_vector_callback = 0;
1630 return;
1631 }
1632 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1633 "enabled\n");
1634 /* in the restore case the vector has already been allocated */
1635 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1636 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1637 }
1638}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001639#else
1640void xen_callback_vector(void) {}
1641#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001642
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001643void __init xen_init_IRQ(void)
1644{
Stefano Stabellinie5fc7342010-12-01 14:51:44 +00001645 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001646
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001647 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1648 GFP_KERNEL);
1649 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1650 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001651
1652 init_evtchn_cpu_bindings();
1653
1654 /* No event channels are 'live' right now. */
1655 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1656 mask_evtchn(i);
1657
Sheng Yang38e20b02010-05-14 12:40:51 +01001658 if (xen_hvm_domain()) {
1659 xen_callback_vector();
1660 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001661 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1662 * __acpi_register_gsi can point at the right function */
1663 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001664 } else {
1665 irq_ctx_init(smp_processor_id());
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +01001666 if (xen_initial_domain())
1667 xen_setup_pirqs();
Sheng Yang38e20b02010-05-14 12:40:51 +01001668 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001669}