blob: 126f254614bfbe21690080ba75e51459d800cde8 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040014#include <linux/irqflags.h>
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040015#include <linux/sched.h>
16#include <linux/tick.h>
Steven Rostedtc0e980a2012-11-15 11:34:21 -050017#include <linux/cpu.h>
18#include <linux/notifier.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040019#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080020
Peter Zijlstrae360adb2010-10-14 14:01:34 +080021
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020022static DEFINE_PER_CPU(struct llist_head, raised_list);
23static DEFINE_PER_CPU(struct llist_head, lazy_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080024
25/*
26 * Claim the entry so that no one else will poke at it.
27 */
Huang Ying38aaf802011-09-08 14:00:46 +080028static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080029{
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020030 unsigned long flags, oflags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080031
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020032 /*
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
35 */
36 flags = work->flags & ~IRQ_WORK_PENDING;
Huang Ying38aaf802011-09-08 14:00:46 +080037 for (;;) {
Huang Ying38aaf802011-09-08 14:00:46 +080038 nflags = flags | IRQ_WORK_FLAGS;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020039 oflags = cmpxchg(&work->flags, flags, nflags);
40 if (oflags == flags)
Huang Ying38aaf802011-09-08 14:00:46 +080041 break;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020042 if (oflags & IRQ_WORK_PENDING)
43 return false;
44 flags = oflags;
Huang Ying38aaf802011-09-08 14:00:46 +080045 cpu_relax();
46 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080047
48 return true;
49}
50
Peter Zijlstrae360adb2010-10-14 14:01:34 +080051void __weak arch_irq_work_raise(void)
52{
53 /*
54 * Lame architectures will get the timer tick callback
55 */
56}
57
58/*
anish kumarc02cf5f2013-02-03 22:08:23 +010059 * Enqueue the irq_work @entry unless it's already pending
60 * somewhere.
61 *
62 * Can be re-enqueued while the callback is still in progress.
Peter Zijlstrae360adb2010-10-14 14:01:34 +080063 */
Peter Zijlstracd578ab2014-02-11 16:01:16 +010064bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065{
anish kumarc02cf5f2013-02-03 22:08:23 +010066 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
Peter Zijlstracd578ab2014-02-11 16:01:16 +010068 return false;
anish kumarc02cf5f2013-02-03 22:08:23 +010069
70 /* Queue the entry and raise the IPI if needed. */
Christoph Lameter20b87692010-12-14 10:28:45 -060071 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080072
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020073 /* If the work is "lazy", handle it from next tick if any */
74 if (work->flags & IRQ_WORK_LAZY) {
75 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
76 tick_nohz_tick_stopped())
77 arch_irq_work_raise();
78 } else {
79 if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040080 arch_irq_work_raise();
81 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080082
Christoph Lameter20b87692010-12-14 10:28:45 -060083 preempt_enable();
Peter Zijlstracd578ab2014-02-11 16:01:16 +010084
85 return true;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080086}
Peter Zijlstrae360adb2010-10-14 14:01:34 +080087EXPORT_SYMBOL_GPL(irq_work_queue);
88
Frederic Weisbecker00b42952012-11-07 21:03:07 +010089bool irq_work_needs_cpu(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080090{
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020091 struct llist_head *raised, *lazy;
Frederic Weisbecker00b42952012-11-07 21:03:07 +010092
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020093 raised = &__get_cpu_var(raised_list);
94 lazy = &__get_cpu_var(lazy_list);
95 if (llist_empty(raised) && llist_empty(lazy))
Frederic Weisbecker00b42952012-11-07 21:03:07 +010096 return false;
97
Steven Rostedt8aa2acc2012-11-15 12:52:44 -050098 /* All work should have been flushed before going offline */
99 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
100
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100101 return true;
102}
103
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200104static void irq_work_run_list(struct llist_head *list)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800105{
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400106 unsigned long flags;
Huang Ying38aaf802011-09-08 14:00:46 +0800107 struct irq_work *work;
Huang Ying38aaf802011-09-08 14:00:46 +0800108 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800109
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800110 BUG_ON(!irqs_disabled());
111
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200112 if (llist_empty(list))
113 return;
114
115 llnode = llist_del_all(list);
Huang Ying38aaf802011-09-08 14:00:46 +0800116 while (llnode != NULL) {
117 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600118
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200119 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800120
121 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800122 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800123 * can be re-used.
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100124 * Make it immediately visible so that other CPUs trying
125 * to claim that work don't rely on us to handle their data
126 * while we are in the middle of the func.
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800127 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400128 flags = work->flags & ~IRQ_WORK_PENDING;
129 xchg(&work->flags, flags);
130
Huang Ying38aaf802011-09-08 14:00:46 +0800131 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800132 /*
133 * Clear the BUSY bit and return to the free state if
134 * no-one else claimed it meanwhile.
135 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400136 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800137 }
138}
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500139
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200140static void __irq_work_run(void)
141{
142 irq_work_run_list(&__get_cpu_var(raised_list));
143 irq_work_run_list(&__get_cpu_var(lazy_list));
144}
145
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500146/*
147 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
148 * context with local IRQs disabled.
149 */
150void irq_work_run(void)
151{
152 BUG_ON(!in_irq());
153 __irq_work_run();
154}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800155EXPORT_SYMBOL_GPL(irq_work_run);
156
157/*
158 * Synchronize against the irq_work @entry, ensures the entry is not
159 * currently in use.
160 */
Huang Ying38aaf802011-09-08 14:00:46 +0800161void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800162{
163 WARN_ON_ONCE(irqs_disabled());
164
Huang Ying38aaf802011-09-08 14:00:46 +0800165 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800166 cpu_relax();
167}
168EXPORT_SYMBOL_GPL(irq_work_sync);
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500169
170#ifdef CONFIG_HOTPLUG_CPU
171static int irq_work_cpu_notify(struct notifier_block *self,
172 unsigned long action, void *hcpu)
173{
174 long cpu = (long)hcpu;
175
176 switch (action) {
177 case CPU_DYING:
178 /* Called from stop_machine */
179 if (WARN_ON_ONCE(cpu != smp_processor_id()))
180 break;
181 __irq_work_run();
182 break;
183 default:
184 break;
185 }
186 return NOTIFY_OK;
187}
188
189static struct notifier_block cpu_notify;
190
191static __init int irq_work_init_cpu_notifier(void)
192{
193 cpu_notify.notifier_call = irq_work_cpu_notify;
194 cpu_notify.priority = 0;
195 register_cpu_notifier(&cpu_notify);
196 return 0;
197}
198device_initcall(irq_work_init_cpu_notifier);
199
200#endif /* CONFIG_HOTPLUG_CPU */