blob: 1d6565e810309eb710523a814983849d76101c6b [file] [log] [blame]
Christoph Hellwig511cbce2015-11-10 14:56:14 +01001/*
2 * Functions related to interrupt-poll handling in the block layer. This
3 * is similar to NAPI for network devices.
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11#include <linux/irq_poll.h>
12#include <linux/delay.h>
13
14static unsigned int irq_poll_budget __read_mostly = 256;
15
16static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
17
18/**
19 * irq_poll_sched - Schedule a run of the iopoll handler
20 * @iop: The parent iopoll structure
21 *
22 * Description:
23 * Add this irq_poll structure to the pending poll list and trigger the
Christoph Hellwigea511902015-12-07 06:41:11 -080024 * raise of the blk iopoll softirq.
Christoph Hellwig511cbce2015-11-10 14:56:14 +010025 **/
26void irq_poll_sched(struct irq_poll *iop)
27{
28 unsigned long flags;
29
Christoph Hellwigea511902015-12-07 06:41:11 -080030 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
31 return;
Bart Van Assche2ee177e2015-12-31 09:56:03 +010032 if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
Christoph Hellwigea511902015-12-07 06:41:11 -080033 return;
34
Christoph Hellwig511cbce2015-11-10 14:56:14 +010035 local_irq_save(flags);
36 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
37 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
38 local_irq_restore(flags);
39}
40EXPORT_SYMBOL(irq_poll_sched);
41
42/**
43 * __irq_poll_complete - Mark this @iop as un-polled again
44 * @iop: The parent iopoll structure
45 *
46 * Description:
47 * See irq_poll_complete(). This function must be called with interrupts
48 * disabled.
49 **/
Christoph Hellwig83af1872015-12-07 06:57:25 -080050static void __irq_poll_complete(struct irq_poll *iop)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010051{
52 list_del(&iop->list);
53 smp_mb__before_atomic();
54 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
55}
Christoph Hellwig511cbce2015-11-10 14:56:14 +010056
57/**
58 * irq_poll_complete - Mark this @iop as un-polled again
59 * @iop: The parent iopoll structure
60 *
61 * Description:
62 * If a driver consumes less than the assigned budget in its run of the
63 * iopoll handler, it'll end the polled mode by calling this function. The
Christoph Hellwigea511902015-12-07 06:41:11 -080064 * iopoll handler will not be invoked again before irq_poll_sched()
Christoph Hellwig511cbce2015-11-10 14:56:14 +010065 * is called.
66 **/
67void irq_poll_complete(struct irq_poll *iop)
68{
69 unsigned long flags;
70
71 local_irq_save(flags);
72 __irq_poll_complete(iop);
73 local_irq_restore(flags);
74}
75EXPORT_SYMBOL(irq_poll_complete);
76
Emese Revfy0766f782016-06-20 20:42:34 +020077static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010078{
79 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
80 int rearm = 0, budget = irq_poll_budget;
81 unsigned long start_time = jiffies;
82
83 local_irq_disable();
84
85 while (!list_empty(list)) {
86 struct irq_poll *iop;
87 int work, weight;
88
89 /*
90 * If softirq window is exhausted then punt.
91 */
92 if (budget <= 0 || time_after(jiffies, start_time)) {
93 rearm = 1;
94 break;
95 }
96
97 local_irq_enable();
98
99 /* Even though interrupts have been re-enabled, this
100 * access is safe because interrupts can only add new
101 * entries to the tail of this list, and only ->poll()
102 * calls can remove this head entry from the list.
103 */
104 iop = list_entry(list->next, struct irq_poll, list);
105
106 weight = iop->weight;
107 work = 0;
108 if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
109 work = iop->poll(iop, weight);
110
111 budget -= work;
112
113 local_irq_disable();
114
115 /*
116 * Drivers must not modify the iopoll state, if they
117 * consume their assigned weight (or more, some drivers can't
118 * easily just stop processing, they have to complete an
119 * entire mask of commands).In such cases this code
120 * still "owns" the iopoll instance and therefore can
121 * move the instance around on the list at-will.
122 */
123 if (work >= weight) {
Christoph Hellwig0bc92ac2015-12-07 06:56:36 -0800124 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100125 __irq_poll_complete(iop);
126 else
127 list_move_tail(&iop->list, list);
128 }
129 }
130
131 if (rearm)
132 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
133
134 local_irq_enable();
135}
136
137/**
138 * irq_poll_disable - Disable iopoll on this @iop
139 * @iop: The parent iopoll structure
140 *
141 * Description:
142 * Disable io polling and wait for any pending callbacks to have completed.
143 **/
144void irq_poll_disable(struct irq_poll *iop)
145{
146 set_bit(IRQ_POLL_F_DISABLE, &iop->state);
147 while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
148 msleep(1);
149 clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
150}
151EXPORT_SYMBOL(irq_poll_disable);
152
153/**
154 * irq_poll_enable - Enable iopoll on this @iop
155 * @iop: The parent iopoll structure
156 *
157 * Description:
158 * Enable iopoll on this @iop. Note that the handler run will not be
159 * scheduled, it will only mark it as active.
160 **/
161void irq_poll_enable(struct irq_poll *iop)
162{
163 BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
164 smp_mb__before_atomic();
165 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
166}
167EXPORT_SYMBOL(irq_poll_enable);
168
169/**
170 * irq_poll_init - Initialize this @iop
171 * @iop: The parent iopoll structure
172 * @weight: The default weight (or command completion budget)
173 * @poll_fn: The handler to invoke
174 *
175 * Description:
Christoph Hellwig78d02642015-12-07 06:38:28 -0800176 * Initialize and enable this irq_poll structure.
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100177 **/
178void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
179{
180 memset(iop, 0, sizeof(*iop));
181 INIT_LIST_HEAD(&iop->list);
182 iop->weight = weight;
183 iop->poll = poll_fn;
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100184}
185EXPORT_SYMBOL(irq_poll_init);
186
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200187static int irq_poll_cpu_dead(unsigned int cpu)
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100188{
189 /*
190 * If a CPU goes away, splice its entries to the current CPU
191 * and trigger a run of the softirq
192 */
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200193 local_irq_disable();
194 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
195 this_cpu_ptr(&blk_cpu_iopoll));
196 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
197 local_irq_enable();
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100198
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200199 return 0;
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100200}
201
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100202static __init int irq_poll_setup(void)
203{
204 int i;
205
206 for_each_possible_cpu(i)
207 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
208
209 open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200210 cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
211 irq_poll_cpu_dead);
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100212 return 0;
213}
214subsys_initcall(irq_poll_setup);