blob: 3d0190282204308005f1867abe6d53125787e1b0 [file] [log] [blame]
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -07002 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
Paul E. McKenneybbad9372010-04-02 16:17:17 -07003 * Internal non-public definitions that provide either classic
Paul E. McKenneya57eb942010-06-29 16:49:16 -07004 * or preemptible semantics.
Paul E. McKenneybbad9372010-04-02 16:17:17 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
Paul E. McKenneya57eb942010-06-29 16:49:16 -070020 * Copyright (c) 2010 Linaro
Paul E. McKenneybbad9372010-04-02 16:17:17 -070021 *
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
Paul E. McKenneyb2c07102010-09-09 13:40:39 -070025#include <linux/kthread.h>
Paul Gortmakerbdfa97b2011-10-25 13:13:57 -040026#include <linux/module.h>
Paul E. McKenney9e571a82010-09-30 21:26:52 -070027#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
Paul E. McKenney24278d12010-09-27 17:25:23 -070030/* Global control variables for rcupdate callback mechanism. */
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -070035 RCU_TRACE(long qlen); /* Number of pending CBs. */
Paul E. McKenneye99033c2011-06-21 00:13:44 -070036 RCU_TRACE(char *name); /* Name of RCU type. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070037};
38
39/* Definition for rcupdate control block. */
40static struct rcu_ctrlblk rcu_sched_ctrlblk = {
41 .donetail = &rcu_sched_ctrlblk.rcucblist,
42 .curtail = &rcu_sched_ctrlblk.rcucblist,
Paul E. McKenneye99033c2011-06-21 00:13:44 -070043 RCU_TRACE(.name = "rcu_sched")
Paul E. McKenney24278d12010-09-27 17:25:23 -070044};
45
46static struct rcu_ctrlblk rcu_bh_ctrlblk = {
47 .donetail = &rcu_bh_ctrlblk.rcucblist,
48 .curtail = &rcu_bh_ctrlblk.rcucblist,
Paul E. McKenneye99033c2011-06-21 00:13:44 -070049 RCU_TRACE(.name = "rcu_bh")
Paul E. McKenney24278d12010-09-27 17:25:23 -070050};
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53int rcu_scheduler_active __read_mostly;
54EXPORT_SYMBOL_GPL(rcu_scheduler_active);
55#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
56
Paul E. McKenneya57eb942010-06-29 16:49:16 -070057#ifdef CONFIG_TINY_PREEMPT_RCU
58
59#include <linux/delay.h>
60
Paul E. McKenneya57eb942010-06-29 16:49:16 -070061/* Global control variables for preemptible RCU. */
62struct rcu_preempt_ctrlblk {
63 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
64 struct rcu_head **nexttail;
65 /* Tasks blocked in a preemptible RCU */
66 /* read-side critical section while an */
67 /* preemptible-RCU grace period is in */
68 /* progress must wait for a later grace */
69 /* period. This pointer points to the */
70 /* ->next pointer of the last task that */
71 /* must wait for a later grace period, or */
72 /* to &->rcb.rcucblist if there is no */
73 /* such task. */
74 struct list_head blkd_tasks;
75 /* Tasks blocked in RCU read-side critical */
76 /* section. Tasks are placed at the head */
77 /* of this list and age towards the tail. */
78 struct list_head *gp_tasks;
79 /* Pointer to the first task blocking the */
80 /* current grace period, or NULL if there */
Paul E. McKenney24278d12010-09-27 17:25:23 -070081 /* is no such task. */
Paul E. McKenneya57eb942010-06-29 16:49:16 -070082 struct list_head *exp_tasks;
83 /* Pointer to first task blocking the */
84 /* current expedited grace period, or NULL */
85 /* if there is no such task. If there */
86 /* is no current expedited grace period, */
87 /* then there cannot be any such task. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070088#ifdef CONFIG_RCU_BOOST
89 struct list_head *boost_tasks;
90 /* Pointer to first task that needs to be */
91 /* priority-boosted, or NULL if no priority */
92 /* boosting is needed. If there is no */
93 /* current or expedited grace period, there */
94 /* can be no such task. */
95#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -070096 u8 gpnum; /* Current grace period. */
97 u8 gpcpu; /* Last grace period blocked by the CPU. */
98 u8 completed; /* Last grace period completed. */
99 /* If all three are equal, RCU is idle. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700100#ifdef CONFIG_RCU_BOOST
Paul E. McKenney24278d12010-09-27 17:25:23 -0700101 unsigned long boost_time; /* When to start boosting (jiffies) */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700102#endif /* #ifdef CONFIG_RCU_BOOST */
103#ifdef CONFIG_RCU_TRACE
104 unsigned long n_grace_periods;
105#ifdef CONFIG_RCU_BOOST
106 unsigned long n_tasks_boosted;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800107 /* Total number of tasks boosted. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700108 unsigned long n_exp_boosts;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800109 /* Number of tasks boosted for expedited GP. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700110 unsigned long n_normal_boosts;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800111 /* Number of tasks boosted for normal GP. */
112 unsigned long n_balk_blkd_tasks;
113 /* Refused to boost: no blocked tasks. */
114 unsigned long n_balk_exp_gp_tasks;
115 /* Refused to boost: nothing blocking GP. */
116 unsigned long n_balk_boost_tasks;
117 /* Refused to boost: already boosting. */
118 unsigned long n_balk_notyet;
119 /* Refused to boost: not yet time. */
120 unsigned long n_balk_nos;
121 /* Refused to boost: not sure why, though. */
122 /* This can happen due to race conditions. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700123#endif /* #ifdef CONFIG_RCU_BOOST */
124#endif /* #ifdef CONFIG_RCU_TRACE */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700125};
126
127static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
128 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
129 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
130 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
131 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
Paul E. McKenneye99033c2011-06-21 00:13:44 -0700132 RCU_TRACE(.rcb.name = "rcu_preempt")
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700133};
134
135static int rcu_preempted_readers_exp(void);
136static void rcu_report_exp_done(void);
137
138/*
139 * Return true if the CPU has not yet responded to the current grace period.
140 */
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700141static int rcu_cpu_blocking_cur_gp(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700142{
143 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
144}
145
146/*
147 * Check for a running RCU reader. Because there is only one CPU,
148 * there can be but one running RCU reader at a time. ;-)
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800149 *
150 * Returns zero if there are no running readers. Returns a positive
151 * number if there is at least one reader within its RCU read-side
152 * critical section. Returns a negative number if an outermost reader
153 * is in the midst of exiting from its RCU read-side critical section
154 *
155 * Returns zero if there are no running readers. Returns a positive
156 * number if there is at least one reader within its RCU read-side
157 * critical section. Returns a negative number if an outermost reader
158 * is in the midst of exiting from its RCU read-side critical section.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700159 */
160static int rcu_preempt_running_reader(void)
161{
162 return current->rcu_read_lock_nesting;
163}
164
165/*
166 * Check for preempted RCU readers blocking any grace period.
167 * If the caller needs a reliable answer, it must disable hard irqs.
168 */
169static int rcu_preempt_blocked_readers_any(void)
170{
171 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
172}
173
174/*
175 * Check for preempted RCU readers blocking the current grace period.
176 * If the caller needs a reliable answer, it must disable hard irqs.
177 */
178static int rcu_preempt_blocked_readers_cgp(void)
179{
180 return rcu_preempt_ctrlblk.gp_tasks != NULL;
181}
182
183/*
184 * Return true if another preemptible-RCU grace period is needed.
185 */
186static int rcu_preempt_needs_another_gp(void)
187{
188 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
189}
190
191/*
192 * Return true if a preemptible-RCU grace period is in progress.
193 * The caller must disable hardirqs.
194 */
195static int rcu_preempt_gp_in_progress(void)
196{
197 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
198}
199
200/*
Paul E. McKenney24278d12010-09-27 17:25:23 -0700201 * Advance a ->blkd_tasks-list pointer to the next entry, instead
202 * returning NULL if at the end of the list.
203 */
204static struct list_head *rcu_next_node_entry(struct task_struct *t)
205{
206 struct list_head *np;
207
208 np = t->rcu_node_entry.next;
209 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
210 np = NULL;
211 return np;
212}
213
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700214#ifdef CONFIG_RCU_TRACE
215
216#ifdef CONFIG_RCU_BOOST
217static void rcu_initiate_boost_trace(void);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700218#endif /* #ifdef CONFIG_RCU_BOOST */
219
220/*
221 * Dump additional statistice for TINY_PREEMPT_RCU.
222 */
223static void show_tiny_preempt_stats(struct seq_file *m)
224{
225 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
226 rcu_preempt_ctrlblk.rcb.qlen,
227 rcu_preempt_ctrlblk.n_grace_periods,
228 rcu_preempt_ctrlblk.gpnum,
229 rcu_preempt_ctrlblk.gpcpu,
230 rcu_preempt_ctrlblk.completed,
231 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
232 "N."[!rcu_preempt_ctrlblk.gp_tasks],
233 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
234#ifdef CONFIG_RCU_BOOST
Paul E. McKenney203373c82011-02-24 15:25:21 -0800235 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
236 " ",
237 "B."[!rcu_preempt_ctrlblk.boost_tasks],
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700238 rcu_preempt_ctrlblk.n_tasks_boosted,
239 rcu_preempt_ctrlblk.n_exp_boosts,
240 rcu_preempt_ctrlblk.n_normal_boosts,
241 (int)(jiffies & 0xffff),
242 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800243 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
244 " balk",
245 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
246 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
247 rcu_preempt_ctrlblk.n_balk_boost_tasks,
248 rcu_preempt_ctrlblk.n_balk_notyet,
249 rcu_preempt_ctrlblk.n_balk_nos);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700250#endif /* #ifdef CONFIG_RCU_BOOST */
251}
252
253#endif /* #ifdef CONFIG_RCU_TRACE */
254
Paul E. McKenney24278d12010-09-27 17:25:23 -0700255#ifdef CONFIG_RCU_BOOST
256
257#include "rtmutex_common.h"
258
Paul E. McKenney965a0022011-06-18 09:55:39 -0700259#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
260
261/* Controls for rcu_kthread() kthread. */
262static struct task_struct *rcu_kthread_task;
263static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
264static unsigned long have_rcu_kthread_work;
265
Paul E. McKenney24278d12010-09-27 17:25:23 -0700266/*
267 * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
268 * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
269 */
270static int rcu_boost(void)
271{
272 unsigned long flags;
273 struct rt_mutex mtx;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700274 struct task_struct *t;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800275 struct list_head *tb;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700276
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800277 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
278 rcu_preempt_ctrlblk.exp_tasks == NULL)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700279 return 0; /* Nothing to boost. */
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800280
Paul E. McKenney7a11e202012-08-21 12:14:19 -0700281 local_irq_save(flags);
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800282
283 /*
284 * Recheck with irqs disabled: all tasks in need of boosting
285 * might exit their RCU read-side critical sections on their own
286 * if we are preempted just before disabling irqs.
287 */
288 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
289 rcu_preempt_ctrlblk.exp_tasks == NULL) {
Paul E. McKenney7a11e202012-08-21 12:14:19 -0700290 local_irq_restore(flags);
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800291 return 0;
292 }
293
294 /*
295 * Preferentially boost tasks blocking expedited grace periods.
296 * This cannot starve the normal grace periods because a second
297 * expedited grace period must boost all blocked tasks, including
298 * those blocking the pre-existing normal grace period.
299 */
300 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
301 tb = rcu_preempt_ctrlblk.exp_tasks;
302 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
303 } else {
304 tb = rcu_preempt_ctrlblk.boost_tasks;
305 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
306 }
307 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
308
309 /*
310 * We boost task t by manufacturing an rt_mutex that appears to
311 * be held by task t. We leave a pointer to that rt_mutex where
312 * task t can find it, and task t will release the mutex when it
313 * exits its outermost RCU read-side critical section. Then
314 * simply acquiring this artificial rt_mutex will boost task
315 * t's priority. (Thanks to tglx for suggesting this approach!)
316 */
317 t = container_of(tb, struct task_struct, rcu_node_entry);
Paul E. McKenney24278d12010-09-27 17:25:23 -0700318 rt_mutex_init_proxy_locked(&mtx, t);
319 t->rcu_boost_mutex = &mtx;
Paul E. McKenney7a11e202012-08-21 12:14:19 -0700320 local_irq_restore(flags);
Paul E. McKenney24278d12010-09-27 17:25:23 -0700321 rt_mutex_lock(&mtx);
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800322 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
323
Paul E. McKenney4f89b332011-12-09 14:43:47 -0800324 return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
325 ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700326}
327
328/*
329 * Check to see if it is now time to start boosting RCU readers blocking
330 * the current grace period, and, if so, tell the rcu_kthread_task to
331 * start boosting them. If there is an expedited boost in progress,
332 * we wait for it to complete.
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700333 *
334 * If there are no blocked readers blocking the current grace period,
335 * return 0 to let the caller know, otherwise return 1. Note that this
336 * return value is independent of whether or not boosting was done.
Paul E. McKenney24278d12010-09-27 17:25:23 -0700337 */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700338static int rcu_initiate_boost(void)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700339{
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800340 if (!rcu_preempt_blocked_readers_cgp() &&
341 rcu_preempt_ctrlblk.exp_tasks == NULL) {
342 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700343 return 0;
344 }
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800345 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
346 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
347 rcu_preempt_ctrlblk.boost_tasks == NULL &&
348 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
349 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
350 rcu_preempt_ctrlblk.boost_tasks =
351 rcu_preempt_ctrlblk.gp_tasks;
Paul E. McKenney965a0022011-06-18 09:55:39 -0700352 invoke_rcu_callbacks();
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700353 } else {
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700354 RCU_TRACE(rcu_initiate_boost_trace());
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700355 }
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700356 return 1;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700357}
358
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700360
361/*
362 * Do priority-boost accounting for the start of a new grace period.
363 */
364static void rcu_preempt_boost_start_gp(void)
365{
366 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700367}
368
369#else /* #ifdef CONFIG_RCU_BOOST */
370
371/*
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700372 * If there is no RCU priority boosting, we don't initiate boosting,
373 * but we do indicate whether there are blocked readers blocking the
374 * current grace period.
Paul E. McKenney24278d12010-09-27 17:25:23 -0700375 */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700376static int rcu_initiate_boost(void)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700377{
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700378 return rcu_preempt_blocked_readers_cgp();
Paul E. McKenney24278d12010-09-27 17:25:23 -0700379}
380
381/*
Paul E. McKenney24278d12010-09-27 17:25:23 -0700382 * If there is no RCU priority boosting, nothing to do at grace-period start.
383 */
384static void rcu_preempt_boost_start_gp(void)
385{
386}
387
388#endif /* else #ifdef CONFIG_RCU_BOOST */
389
390/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700391 * Record a preemptible-RCU quiescent state for the specified CPU. Note
392 * that this just means that the task currently running on the CPU is
393 * in a quiescent state. There might be any number of tasks blocked
394 * while in an RCU read-side critical section.
395 *
396 * Unlike the other rcu_*_qs() functions, callers to this function
397 * must disable irqs in order to protect the assignment to
398 * ->rcu_read_unlock_special.
399 *
400 * Because this is a single-CPU implementation, the only way a grace
401 * period can end is if the CPU is in a quiescent state. The reason is
402 * that a blocked preemptible-RCU reader can exit its critical section
403 * only if the CPU is running it at the time. Therefore, when the
404 * last task blocking the current grace period exits its RCU read-side
405 * critical section, neither the CPU nor blocked tasks will be stopping
406 * the current grace period. (In contrast, SMP implementations
407 * might have CPUs running in RCU read-side critical sections that
408 * block later grace periods -- but this is not possible given only
409 * one CPU.)
410 */
411static void rcu_preempt_cpu_qs(void)
412{
413 /* Record both CPU and task as having responded to current GP. */
414 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
415 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
416
Paul E. McKenney24278d12010-09-27 17:25:23 -0700417 /* If there is no GP then there is nothing more to do. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700418 if (!rcu_preempt_gp_in_progress())
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700419 return;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700420 /*
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800421 * Check up on boosting. If there are readers blocking the
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700422 * current grace period, leave.
423 */
424 if (rcu_initiate_boost())
Paul E. McKenney24278d12010-09-27 17:25:23 -0700425 return;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700426
427 /* Advance callbacks. */
428 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
429 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
430 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
431
432 /* If there are no blocked readers, next GP is done instantly. */
433 if (!rcu_preempt_blocked_readers_any())
434 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
435
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700436 /* If there are done callbacks, cause them to be invoked. */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700437 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
Paul E. McKenney965a0022011-06-18 09:55:39 -0700438 invoke_rcu_callbacks();
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700439}
440
441/*
442 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
443 */
444static void rcu_preempt_start_gp(void)
445{
446 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
447
448 /* Official start of GP. */
449 rcu_preempt_ctrlblk.gpnum++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700451
452 /* Any blocked RCU readers block new GP. */
453 if (rcu_preempt_blocked_readers_any())
454 rcu_preempt_ctrlblk.gp_tasks =
455 rcu_preempt_ctrlblk.blkd_tasks.next;
456
Paul E. McKenney24278d12010-09-27 17:25:23 -0700457 /* Set up for RCU priority boosting. */
458 rcu_preempt_boost_start_gp();
459
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700460 /* If there is no running reader, CPU is done with GP. */
461 if (!rcu_preempt_running_reader())
462 rcu_preempt_cpu_qs();
463 }
464}
465
466/*
467 * We have entered the scheduler, and the current task might soon be
468 * context-switched away from. If this task is in an RCU read-side
469 * critical section, we will no longer be able to rely on the CPU to
470 * record that fact, so we enqueue the task on the blkd_tasks list.
471 * If the task started after the current grace period began, as recorded
472 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
473 * before the element referenced by ->gp_tasks (or at the tail if
474 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
475 * The task will dequeue itself when it exits the outermost enclosing
476 * RCU read-side critical section. Therefore, the current grace period
477 * cannot be permitted to complete until the ->gp_tasks pointer becomes
478 * NULL.
479 *
480 * Caller must disable preemption.
481 */
482void rcu_preempt_note_context_switch(void)
483{
484 struct task_struct *t = current;
485 unsigned long flags;
486
487 local_irq_save(flags); /* must exclude scheduler_tick(). */
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800488 if (rcu_preempt_running_reader() > 0 &&
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
490
491 /* Possibly blocking in an RCU read-side critical section. */
492 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
493
494 /*
495 * If this CPU has already checked in, then this task
496 * will hold up the next grace period rather than the
497 * current grace period. Queue the task accordingly.
498 * If the task is queued for the current grace period
499 * (i.e., this CPU has not yet passed through a quiescent
500 * state for the current grace period), then as long
501 * as that task remains queued, the current grace period
502 * cannot end.
503 */
504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700505 if (rcu_cpu_blocking_cur_gp())
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800507 } else if (rcu_preempt_running_reader() < 0 &&
508 t->rcu_read_unlock_special) {
509 /*
510 * Complete exit from RCU read-side critical section on
511 * behalf of preempted instance of __rcu_read_unlock().
512 */
513 rcu_read_unlock_special(t);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700514 }
515
516 /*
517 * Either we were not in an RCU read-side critical section to
518 * begin with, or we have now recorded that critical section
519 * globally. Either way, we can now note a quiescent state
520 * for this CPU. Again, if we were in an RCU read-side critical
521 * section, and if that critical section was blocking the current
522 * grace period, then the fact that the task has been enqueued
523 * means that current grace period continues to be blocked.
524 */
525 rcu_preempt_cpu_qs();
526 local_irq_restore(flags);
527}
528
529/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700530 * Handle special cases during rcu_read_unlock(), such as needing to
531 * notify RCU core processing or task having blocked during the RCU
532 * read-side critical section.
533 */
Paul E. McKenney2a3fa842012-05-21 11:58:36 -0700534void rcu_read_unlock_special(struct task_struct *t)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700535{
536 int empty;
537 int empty_exp;
538 unsigned long flags;
539 struct list_head *np;
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800540#ifdef CONFIG_RCU_BOOST
541 struct rt_mutex *rbmp = NULL;
542#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700543 int special;
544
545 /*
546 * NMI handlers cannot block and cannot safely manipulate state.
547 * They therefore cannot possibly be special, so just leave.
548 */
549 if (in_nmi())
550 return;
551
552 local_irq_save(flags);
553
554 /*
555 * If RCU core is waiting for this CPU to exit critical section,
556 * let it know that we have done so.
557 */
558 special = t->rcu_read_unlock_special;
559 if (special & RCU_READ_UNLOCK_NEED_QS)
560 rcu_preempt_cpu_qs();
561
562 /* Hardware IRQ handlers cannot block. */
Paul E. McKenney87627052012-01-11 16:59:01 -0800563 if (in_irq() || in_serving_softirq()) {
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700564 local_irq_restore(flags);
565 return;
566 }
567
568 /* Clean up if blocked during RCU read-side critical section. */
569 if (special & RCU_READ_UNLOCK_BLOCKED) {
570 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
571
572 /*
573 * Remove this task from the ->blkd_tasks list and adjust
574 * any pointers that might have been referencing it.
575 */
576 empty = !rcu_preempt_blocked_readers_cgp();
577 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700578 np = rcu_next_node_entry(t);
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800579 list_del_init(&t->rcu_node_entry);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700580 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
581 rcu_preempt_ctrlblk.gp_tasks = np;
582 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
583 rcu_preempt_ctrlblk.exp_tasks = np;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700584#ifdef CONFIG_RCU_BOOST
585 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
586 rcu_preempt_ctrlblk.boost_tasks = np;
587#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700588
589 /*
590 * If this was the last task on the current list, and if
591 * we aren't waiting on the CPU, report the quiescent state
592 * and start a new grace period if needed.
593 */
594 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
595 rcu_preempt_cpu_qs();
596 rcu_preempt_start_gp();
597 }
598
599 /*
600 * If this was the last task on the expedited lists,
601 * then we need wake up the waiting task.
602 */
603 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
604 rcu_report_exp_done();
605 }
Paul E. McKenney24278d12010-09-27 17:25:23 -0700606#ifdef CONFIG_RCU_BOOST
607 /* Unboost self if was boosted. */
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800608 if (t->rcu_boost_mutex != NULL) {
609 rbmp = t->rcu_boost_mutex;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700610 t->rcu_boost_mutex = NULL;
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800611 rt_mutex_unlock(rbmp);
Paul E. McKenney24278d12010-09-27 17:25:23 -0700612 }
613#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700614 local_irq_restore(flags);
615}
616
617/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700618 * Check for a quiescent state from the current CPU. When a task blocks,
619 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
620 * checked elsewhere. This is called from the scheduling-clock interrupt.
621 *
622 * Caller must disable hard irqs.
623 */
624static void rcu_preempt_check_callbacks(void)
625{
626 struct task_struct *t = current;
627
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700628 if (rcu_preempt_gp_in_progress() &&
629 (!rcu_preempt_running_reader() ||
630 !rcu_cpu_blocking_cur_gp()))
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700631 rcu_preempt_cpu_qs();
632 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
633 rcu_preempt_ctrlblk.rcb.donetail)
Paul E. McKenney965a0022011-06-18 09:55:39 -0700634 invoke_rcu_callbacks();
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700635 if (rcu_preempt_gp_in_progress() &&
636 rcu_cpu_blocking_cur_gp() &&
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800637 rcu_preempt_running_reader() > 0)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700638 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
639}
640
641/*
642 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700643 * update, so this is invoked from rcu_process_callbacks() to
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700644 * handle that case. Of course, it is invoked for all flavors of
645 * RCU, but RCU callbacks can appear only on one of the lists, and
646 * neither ->nexttail nor ->donetail can possibly be NULL, so there
647 * is no need for an explicit check.
648 */
649static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
650{
651 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
652 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
653}
654
655/*
656 * Process callbacks for preemptible RCU.
657 */
658static void rcu_preempt_process_callbacks(void)
659{
Paul E. McKenney965a0022011-06-18 09:55:39 -0700660 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700661}
662
663/*
664 * Queue a preemptible -RCU callback for invocation after a grace period.
665 */
666void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
667{
668 unsigned long flags;
669
670 debug_rcu_head_queue(head);
671 head->func = func;
672 head->next = NULL;
673
674 local_irq_save(flags);
675 *rcu_preempt_ctrlblk.nexttail = head;
676 rcu_preempt_ctrlblk.nexttail = &head->next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700677 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700678 rcu_preempt_start_gp(); /* checks to see if GP needed. */
679 local_irq_restore(flags);
680}
681EXPORT_SYMBOL_GPL(call_rcu);
682
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700683/*
684 * synchronize_rcu - wait until a grace period has elapsed.
685 *
686 * Control will return to the caller some time after a full grace
687 * period has elapsed, in other words after all currently executing RCU
688 * read-side critical sections have completed. RCU read-side critical
689 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
690 * and may be nested.
691 */
692void synchronize_rcu(void)
693{
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800694 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
695 !lock_is_held(&rcu_lock_map) &&
696 !lock_is_held(&rcu_sched_lock_map),
697 "Illegal synchronize_rcu() in RCU read-side critical section");
698
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700699#ifdef CONFIG_DEBUG_LOCK_ALLOC
700 if (!rcu_scheduler_active)
701 return;
702#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
703
704 WARN_ON_ONCE(rcu_preempt_running_reader());
705 if (!rcu_preempt_blocked_readers_any())
706 return;
707
708 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
709 rcu_barrier();
710}
711EXPORT_SYMBOL_GPL(synchronize_rcu);
712
713static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
714static unsigned long sync_rcu_preempt_exp_count;
715static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
716
717/*
718 * Return non-zero if there are any tasks in RCU read-side critical
719 * sections blocking the current preemptible-RCU expedited grace period.
720 * If there is no preemptible-RCU expedited grace period currently in
721 * progress, returns zero unconditionally.
722 */
723static int rcu_preempted_readers_exp(void)
724{
725 return rcu_preempt_ctrlblk.exp_tasks != NULL;
726}
727
728/*
729 * Report the exit from RCU read-side critical section for the last task
730 * that queued itself during or before the current expedited preemptible-RCU
731 * grace period.
732 */
733static void rcu_report_exp_done(void)
734{
735 wake_up(&sync_rcu_preempt_exp_wq);
736}
737
738/*
739 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
740 * is to rely in the fact that there is but one CPU, and that it is
741 * illegal for a task to invoke synchronize_rcu_expedited() while in a
742 * preemptible-RCU read-side critical section. Therefore, any such
743 * critical sections must correspond to blocked tasks, which must therefore
744 * be on the ->blkd_tasks list. So just record the current head of the
745 * list in the ->exp_tasks pointer, and wait for all tasks including and
746 * after the task pointed to by ->exp_tasks to drain.
747 */
748void synchronize_rcu_expedited(void)
749{
750 unsigned long flags;
751 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
752 unsigned long snap;
753
754 barrier(); /* ensure prior action seen before grace period. */
755
756 WARN_ON_ONCE(rcu_preempt_running_reader());
757
758 /*
759 * Acquire lock so that there is only one preemptible RCU grace
760 * period in flight. Of course, if someone does the expedited
761 * grace period for us while we are acquiring the lock, just leave.
762 */
763 snap = sync_rcu_preempt_exp_count + 1;
764 mutex_lock(&sync_rcu_preempt_exp_mutex);
765 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
766 goto unlock_mb_ret; /* Others did our work for us. */
767
768 local_irq_save(flags);
769
770 /*
771 * All RCU readers have to already be on blkd_tasks because
772 * we cannot legally be executing in an RCU read-side critical
773 * section.
774 */
775
776 /* Snapshot current head of ->blkd_tasks list. */
777 rpcp->exp_tasks = rpcp->blkd_tasks.next;
778 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
779 rpcp->exp_tasks = NULL;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700780
781 /* Wait for tail of ->blkd_tasks list to drain. */
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700782 if (!rcu_preempted_readers_exp()) {
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800783 local_irq_restore(flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700784 } else {
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800785 rcu_initiate_boost();
786 local_irq_restore(flags);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700787 wait_event(sync_rcu_preempt_exp_wq,
788 !rcu_preempted_readers_exp());
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800789 }
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700790
791 /* Clean up and exit. */
792 barrier(); /* ensure expedited GP seen before counter increment. */
793 sync_rcu_preempt_exp_count++;
794unlock_mb_ret:
795 mutex_unlock(&sync_rcu_preempt_exp_mutex);
796 barrier(); /* ensure subsequent action seen after grace period. */
797}
798EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
799
800/*
801 * Does preemptible RCU need the CPU to stay out of dynticks mode?
802 */
803int rcu_preempt_needs_cpu(void)
804{
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700805 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
806}
807
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700808#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
809
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700810#ifdef CONFIG_RCU_TRACE
811
812/*
813 * Because preemptible RCU does not exist, it is not necessary to
814 * dump out its statistics.
815 */
816static void show_tiny_preempt_stats(struct seq_file *m)
817{
818}
819
820#endif /* #ifdef CONFIG_RCU_TRACE */
821
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700822/*
823 * Because preemptible RCU does not exist, it never has any callbacks
824 * to check.
825 */
826static void rcu_preempt_check_callbacks(void)
827{
828}
829
830/*
831 * Because preemptible RCU does not exist, it never has any callbacks
832 * to remove.
833 */
834static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
835{
836}
837
838/*
839 * Because preemptible RCU does not exist, it never has any callbacks
840 * to process.
841 */
842static void rcu_preempt_process_callbacks(void)
843{
844}
845
846#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
847
Paul E. McKenney965a0022011-06-18 09:55:39 -0700848#ifdef CONFIG_RCU_BOOST
849
850/*
851 * Wake up rcu_kthread() to process callbacks now eligible for invocation
852 * or to boost readers.
853 */
854static void invoke_rcu_callbacks(void)
855{
856 have_rcu_kthread_work = 1;
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800857 if (rcu_kthread_task != NULL)
858 wake_up(&rcu_kthread_wq);
Paul E. McKenney965a0022011-06-18 09:55:39 -0700859}
860
Paul E. McKenney4968c302011-12-07 16:32:40 -0800861#ifdef CONFIG_RCU_TRACE
862
863/*
864 * Is the current CPU running the RCU-callbacks kthread?
865 * Caller must have preemption disabled.
866 */
867static bool rcu_is_callbacks_kthread(void)
868{
869 return rcu_kthread_task == current;
870}
871
872#endif /* #ifdef CONFIG_RCU_TRACE */
873
Paul E. McKenney965a0022011-06-18 09:55:39 -0700874/*
875 * This kthread invokes RCU callbacks whose grace periods have
876 * elapsed. It is awakened as needed, and takes the place of the
877 * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
878 * This is a kthread, but it is never stopped, at least not until
879 * the system goes down.
880 */
881static int rcu_kthread(void *arg)
882{
883 unsigned long work;
884 unsigned long morework;
885 unsigned long flags;
886
887 for (;;) {
888 wait_event_interruptible(rcu_kthread_wq,
889 have_rcu_kthread_work != 0);
890 morework = rcu_boost();
891 local_irq_save(flags);
892 work = have_rcu_kthread_work;
893 have_rcu_kthread_work = morework;
894 local_irq_restore(flags);
895 if (work)
896 rcu_process_callbacks(NULL);
897 schedule_timeout_interruptible(1); /* Leave CPU for others. */
898 }
899
900 return 0; /* Not reached, but needed to shut gcc up. */
901}
902
903/*
904 * Spawn the kthread that invokes RCU callbacks.
905 */
906static int __init rcu_spawn_kthreads(void)
907{
908 struct sched_param sp;
909
910 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
911 sp.sched_priority = RCU_BOOST_PRIO;
912 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
913 return 0;
914}
915early_initcall(rcu_spawn_kthreads);
916
917#else /* #ifdef CONFIG_RCU_BOOST */
918
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800919/* Hold off callback invocation until early_initcall() time. */
920static int rcu_scheduler_fully_active __read_mostly;
921
Paul E. McKenney965a0022011-06-18 09:55:39 -0700922/*
923 * Start up softirq processing of callbacks.
924 */
925void invoke_rcu_callbacks(void)
926{
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800927 if (rcu_scheduler_fully_active)
928 raise_softirq(RCU_SOFTIRQ);
Paul E. McKenney965a0022011-06-18 09:55:39 -0700929}
930
Paul E. McKenney4968c302011-12-07 16:32:40 -0800931#ifdef CONFIG_RCU_TRACE
932
933/*
934 * There is no callback kthread, so this thread is never it.
935 */
936static bool rcu_is_callbacks_kthread(void)
937{
938 return false;
939}
940
941#endif /* #ifdef CONFIG_RCU_TRACE */
942
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800943static int __init rcu_scheduler_really_started(void)
Paul E. McKenney965a0022011-06-18 09:55:39 -0700944{
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800945 rcu_scheduler_fully_active = 1;
Paul E. McKenney965a0022011-06-18 09:55:39 -0700946 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800947 raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */
948 return 0;
Paul E. McKenney965a0022011-06-18 09:55:39 -0700949}
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800950early_initcall(rcu_scheduler_really_started);
Paul E. McKenney965a0022011-06-18 09:55:39 -0700951
952#endif /* #else #ifdef CONFIG_RCU_BOOST */
953
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700954#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700955#include <linux/kernel_stat.h>
956
957/*
958 * During boot, we forgive RCU lockdep issues. After this function is
959 * invoked, we start taking RCU lockdep issues seriously.
960 */
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700961void __init rcu_scheduler_starting(void)
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700962{
963 WARN_ON(nr_context_switches() > 0);
964 rcu_scheduler_active = 1;
965}
966
967#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
Paul E. McKenney24278d12010-09-27 17:25:23 -0700968
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700969#ifdef CONFIG_RCU_TRACE
970
971#ifdef CONFIG_RCU_BOOST
972
973static void rcu_initiate_boost_trace(void)
974{
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700975 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800976 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
977 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
978 rcu_preempt_ctrlblk.exp_tasks == NULL)
979 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
980 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
981 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
982 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
983 rcu_preempt_ctrlblk.n_balk_notyet++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700984 else
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800985 rcu_preempt_ctrlblk.n_balk_nos++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700986}
987
988#endif /* #ifdef CONFIG_RCU_BOOST */
989
990static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
991{
992 unsigned long flags;
993
Paul E. McKenney7a11e202012-08-21 12:14:19 -0700994 local_irq_save(flags);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700995 rcp->qlen -= n;
Paul E. McKenney7a11e202012-08-21 12:14:19 -0700996 local_irq_restore(flags);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700997}
998
999/*
1000 * Dump statistics for TINY_RCU, such as they are.
1001 */
1002static int show_tiny_stats(struct seq_file *m, void *unused)
1003{
1004 show_tiny_preempt_stats(m);
1005 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1006 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1007 return 0;
1008}
1009
1010static int show_tiny_stats_open(struct inode *inode, struct file *file)
1011{
1012 return single_open(file, show_tiny_stats, NULL);
1013}
1014
1015static const struct file_operations show_tiny_stats_fops = {
1016 .owner = THIS_MODULE,
1017 .open = show_tiny_stats_open,
1018 .read = seq_read,
1019 .llseek = seq_lseek,
1020 .release = single_release,
1021};
1022
1023static struct dentry *rcudir;
1024
1025static int __init rcutiny_trace_init(void)
1026{
1027 struct dentry *retval;
1028
1029 rcudir = debugfs_create_dir("rcu", NULL);
1030 if (!rcudir)
1031 goto free_out;
1032 retval = debugfs_create_file("rcudata", 0444, rcudir,
1033 NULL, &show_tiny_stats_fops);
1034 if (!retval)
1035 goto free_out;
1036 return 0;
1037free_out:
1038 debugfs_remove_recursive(rcudir);
1039 return 1;
1040}
1041
1042static void __exit rcutiny_trace_cleanup(void)
1043{
1044 debugfs_remove_recursive(rcudir);
1045}
1046
1047module_init(rcutiny_trace_init);
1048module_exit(rcutiny_trace_cleanup);
1049
1050MODULE_AUTHOR("Paul E. McKenney");
1051MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1052MODULE_LICENSE("GPL");
1053
1054#endif /* #ifdef CONFIG_RCU_TRACE */