blob: 7a1edf417d187d1df58042768d99dfe6bb55f4ce [file] [log] [blame]
Paul E. McKenney8704baa2015-12-31 18:33:22 -08001/*
2 * Read-Copy Update module-based performance-test facility
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2015
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 */
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/kthread.h>
27#include <linux/err.h>
28#include <linux/spinlock.h>
29#include <linux/smp.h>
30#include <linux/rcupdate.h>
31#include <linux/interrupt.h>
32#include <linux/sched.h>
33#include <linux/atomic.h>
34#include <linux/bitops.h>
35#include <linux/completion.h>
36#include <linux/moduleparam.h>
37#include <linux/percpu.h>
38#include <linux/notifier.h>
39#include <linux/reboot.h>
40#include <linux/freezer.h>
41#include <linux/cpu.h>
42#include <linux/delay.h>
43#include <linux/stat.h>
44#include <linux/srcu.h>
45#include <linux/slab.h>
46#include <asm/byteorder.h>
47#include <linux/torture.h>
48#include <linux/vmalloc.h>
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
52
53#define PERF_FLAG "-perf:"
54#define PERFOUT_STRING(s) \
55 pr_alert("%s" PERF_FLAG s "\n", perf_type)
56#define VERBOSE_PERFOUT_STRING(s) \
57 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
58#define VERBOSE_PERFOUT_ERRSTRING(s) \
59 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
60
61torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
62torture_param(int, nreaders, -1, "Number of RCU reader threads");
63torture_param(int, nwriters, -1, "Number of RCU updater threads");
64torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
65torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
66
67static char *perf_type = "rcu";
68module_param(perf_type, charp, 0444);
69MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
70
71static int nrealreaders;
72static int nrealwriters;
73static struct task_struct **writer_tasks;
74static struct task_struct **reader_tasks;
75static struct task_struct *shutdown_task;
76
77static u64 **writer_durations;
78static int *writer_n_durations;
79static atomic_t n_rcu_perf_reader_started;
80static atomic_t n_rcu_perf_writer_started;
81static atomic_t n_rcu_perf_writer_finished;
82static wait_queue_head_t shutdown_wq;
83static u64 t_rcu_perf_writer_started;
84static u64 t_rcu_perf_writer_finished;
85static unsigned long b_rcu_perf_writer_started;
86static unsigned long b_rcu_perf_writer_finished;
87
88static int rcu_perf_writer_state;
89#define RTWS_INIT 0
90#define RTWS_EXP_SYNC 1
91#define RTWS_SYNC 2
92#define RTWS_IDLE 2
93#define RTWS_STOPPING 3
94
95#define MAX_MEAS 10000
96#define MIN_MEAS 100
97
98#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
99#define RCUPERF_RUNNABLE_INIT 1
100#else
101#define RCUPERF_RUNNABLE_INIT 0
102#endif
103static int perf_runnable = RCUPERF_RUNNABLE_INIT;
104module_param(perf_runnable, int, 0444);
105MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
106
107/*
108 * Operations vector for selecting different types of tests.
109 */
110
111struct rcu_perf_ops {
112 int ptype;
113 void (*init)(void);
114 void (*cleanup)(void);
115 int (*readlock)(void);
116 void (*readunlock)(int idx);
117 unsigned long (*started)(void);
118 unsigned long (*completed)(void);
119 unsigned long (*exp_completed)(void);
120 void (*sync)(void);
121 void (*exp_sync)(void);
122 const char *name;
123};
124
125static struct rcu_perf_ops *cur_ops;
126
127/*
128 * Definitions for rcu perf testing.
129 */
130
131static int rcu_perf_read_lock(void) __acquires(RCU)
132{
133 rcu_read_lock();
134 return 0;
135}
136
137static void rcu_perf_read_unlock(int idx) __releases(RCU)
138{
139 rcu_read_unlock();
140}
141
142static unsigned long __maybe_unused rcu_no_completed(void)
143{
144 return 0;
145}
146
147static void rcu_sync_perf_init(void)
148{
149}
150
151static struct rcu_perf_ops rcu_ops = {
152 .ptype = RCU_FLAVOR,
153 .init = rcu_sync_perf_init,
154 .readlock = rcu_perf_read_lock,
155 .readunlock = rcu_perf_read_unlock,
156 .started = rcu_batches_started,
157 .completed = rcu_batches_completed,
158 .exp_completed = rcu_exp_batches_completed,
159 .sync = synchronize_rcu,
160 .exp_sync = synchronize_rcu_expedited,
161 .name = "rcu"
162};
163
164/*
165 * Definitions for rcu_bh perf testing.
166 */
167
168static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
169{
170 rcu_read_lock_bh();
171 return 0;
172}
173
174static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
175{
176 rcu_read_unlock_bh();
177}
178
179static struct rcu_perf_ops rcu_bh_ops = {
180 .ptype = RCU_BH_FLAVOR,
181 .init = rcu_sync_perf_init,
182 .readlock = rcu_bh_perf_read_lock,
183 .readunlock = rcu_bh_perf_read_unlock,
184 .started = rcu_batches_started_bh,
185 .completed = rcu_batches_completed_bh,
186 .exp_completed = rcu_exp_batches_completed_sched,
187 .sync = synchronize_rcu_bh,
188 .exp_sync = synchronize_rcu_bh_expedited,
189 .name = "rcu_bh"
190};
191
192/*
193 * Definitions for srcu perf testing.
194 */
195
196DEFINE_STATIC_SRCU(srcu_ctl_perf);
197static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
198
199static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
200{
201 return srcu_read_lock(srcu_ctlp);
202}
203
204static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
205{
206 srcu_read_unlock(srcu_ctlp, idx);
207}
208
209static unsigned long srcu_perf_completed(void)
210{
211 return srcu_batches_completed(srcu_ctlp);
212}
213
214static void srcu_perf_synchronize(void)
215{
216 synchronize_srcu(srcu_ctlp);
217}
218
219static void srcu_perf_synchronize_expedited(void)
220{
221 synchronize_srcu_expedited(srcu_ctlp);
222}
223
224static struct rcu_perf_ops srcu_ops = {
225 .ptype = SRCU_FLAVOR,
226 .init = rcu_sync_perf_init,
227 .readlock = srcu_perf_read_lock,
228 .readunlock = srcu_perf_read_unlock,
229 .started = NULL,
230 .completed = srcu_perf_completed,
231 .exp_completed = srcu_perf_completed,
232 .sync = srcu_perf_synchronize,
233 .exp_sync = srcu_perf_synchronize_expedited,
234 .name = "srcu"
235};
236
237/*
238 * Definitions for sched perf testing.
239 */
240
241static int sched_perf_read_lock(void)
242{
243 preempt_disable();
244 return 0;
245}
246
247static void sched_perf_read_unlock(int idx)
248{
249 preempt_enable();
250}
251
252static struct rcu_perf_ops sched_ops = {
253 .ptype = RCU_SCHED_FLAVOR,
254 .init = rcu_sync_perf_init,
255 .readlock = sched_perf_read_lock,
256 .readunlock = sched_perf_read_unlock,
257 .started = rcu_batches_started_sched,
258 .completed = rcu_batches_completed_sched,
259 .exp_completed = rcu_exp_batches_completed_sched,
260 .sync = synchronize_sched,
261 .exp_sync = synchronize_sched_expedited,
262 .name = "sched"
263};
264
265#ifdef CONFIG_TASKS_RCU
266
267/*
268 * Definitions for RCU-tasks perf testing.
269 */
270
271static int tasks_perf_read_lock(void)
272{
273 return 0;
274}
275
276static void tasks_perf_read_unlock(int idx)
277{
278}
279
280static struct rcu_perf_ops tasks_ops = {
281 .ptype = RCU_TASKS_FLAVOR,
282 .init = rcu_sync_perf_init,
283 .readlock = tasks_perf_read_lock,
284 .readunlock = tasks_perf_read_unlock,
285 .started = rcu_no_completed,
286 .completed = rcu_no_completed,
287 .sync = synchronize_rcu_tasks,
288 .exp_sync = synchronize_rcu_tasks,
289 .name = "tasks"
290};
291
292#define RCUPERF_TASKS_OPS &tasks_ops,
293
294static bool __maybe_unused torturing_tasks(void)
295{
296 return cur_ops == &tasks_ops;
297}
298
299#else /* #ifdef CONFIG_TASKS_RCU */
300
301#define RCUPERF_TASKS_OPS
302
303static bool __maybe_unused torturing_tasks(void)
304{
305 return false;
306}
307
308#endif /* #else #ifdef CONFIG_TASKS_RCU */
309
310/*
311 * If performance tests complete, wait for shutdown to commence.
312 */
313static void rcu_perf_wait_shutdown(void)
314{
315 cond_resched_rcu_qs();
316 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
317 return;
318 while (!torture_must_stop())
319 schedule_timeout_uninterruptible(1);
320}
321
322/*
323 * RCU perf reader kthread. Repeatedly does empty RCU read-side
324 * critical section, minimizing update-side interference.
325 */
326static int
327rcu_perf_reader(void *arg)
328{
329 unsigned long flags;
330 int idx;
Paul E. McKenney6b558c42016-01-12 14:15:40 -0800331 long me = (long)arg;
Paul E. McKenney8704baa2015-12-31 18:33:22 -0800332
333 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
Paul E. McKenney6b558c42016-01-12 14:15:40 -0800334 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
Paul E. McKenney8704baa2015-12-31 18:33:22 -0800335 set_user_nice(current, MAX_NICE);
336 atomic_inc(&n_rcu_perf_reader_started);
337
338 do {
339 local_irq_save(flags);
340 idx = cur_ops->readlock();
341 cur_ops->readunlock(idx);
342 local_irq_restore(flags);
343 rcu_perf_wait_shutdown();
344 } while (!torture_must_stop());
345 torture_kthread_stopping("rcu_perf_reader");
346 return 0;
347}
348
349/*
350 * RCU perf writer kthread. Repeatedly does a grace period.
351 */
352static int
353rcu_perf_writer(void *arg)
354{
355 int i = 0;
356 int i_max;
357 long me = (long)arg;
358 bool started = false, done = false, alldone = false;
359 u64 t;
360 u64 *wdp;
361 u64 *wdpp = writer_durations[me];
362
363 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
364 WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
365 WARN_ON(rcu_gp_is_normal() && gp_exp);
366 WARN_ON(!wdpp);
Paul E. McKenney6b558c42016-01-12 14:15:40 -0800367 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
Paul E. McKenney8704baa2015-12-31 18:33:22 -0800368 t = ktime_get_mono_fast_ns();
369 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
370 t_rcu_perf_writer_started = t;
371 if (gp_exp) {
372 b_rcu_perf_writer_started =
373 cur_ops->exp_completed() / 2;
374 } else {
375 b_rcu_perf_writer_started =
376 cur_ops->completed();
377 }
378 }
379
380 do {
381 wdp = &wdpp[i];
382 *wdp = ktime_get_mono_fast_ns();
383 if (gp_exp) {
384 rcu_perf_writer_state = RTWS_EXP_SYNC;
385 cur_ops->exp_sync();
386 } else {
387 rcu_perf_writer_state = RTWS_SYNC;
388 cur_ops->sync();
389 }
390 rcu_perf_writer_state = RTWS_IDLE;
391 t = ktime_get_mono_fast_ns();
392 *wdp = t - *wdp;
393 i_max = i;
394 if (!started &&
395 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
396 started = true;
397 if (!done && i >= MIN_MEAS) {
398 done = true;
399 pr_alert("%s" PERF_FLAG
400 "rcu_perf_writer %ld has %d measurements\n",
401 perf_type, me, MIN_MEAS);
402 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
403 nrealwriters) {
404 PERFOUT_STRING("Test complete");
405 t_rcu_perf_writer_finished = t;
406 if (gp_exp) {
407 b_rcu_perf_writer_finished =
408 cur_ops->exp_completed() / 2;
409 } else {
410 b_rcu_perf_writer_finished =
411 cur_ops->completed();
412 }
413 smp_mb(); /* Assign before wake. */
414 wake_up(&shutdown_wq);
415 }
416 }
417 if (done && !alldone &&
418 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
419 alldone = true;
420 if (started && !alldone && i < MAX_MEAS - 1)
421 i++;
422 rcu_perf_wait_shutdown();
423 } while (!torture_must_stop());
424 rcu_perf_writer_state = RTWS_STOPPING;
425 writer_n_durations[me] = i_max;
426 torture_kthread_stopping("rcu_perf_writer");
427 return 0;
428}
429
430static inline void
431rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
432{
433 pr_alert("%s" PERF_FLAG
434 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
435 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
436}
437
438static void
439rcu_perf_cleanup(void)
440{
441 int i;
442 int j;
443 int ngps = 0;
444 u64 *wdp;
445 u64 *wdpp;
446
447 if (torture_cleanup_begin())
448 return;
449
450 if (reader_tasks) {
451 for (i = 0; i < nrealreaders; i++)
452 torture_stop_kthread(rcu_perf_reader,
453 reader_tasks[i]);
454 kfree(reader_tasks);
455 }
456
457 if (writer_tasks) {
458 for (i = 0; i < nrealwriters; i++) {
459 torture_stop_kthread(rcu_perf_writer,
460 writer_tasks[i]);
461 if (!writer_n_durations)
462 continue;
463 j = writer_n_durations[i];
464 pr_alert("%s%s writer %d gps: %d\n",
465 perf_type, PERF_FLAG, i, j);
466 ngps += j;
467 }
468 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
469 perf_type, PERF_FLAG,
470 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
471 t_rcu_perf_writer_finished -
472 t_rcu_perf_writer_started,
473 ngps,
474 b_rcu_perf_writer_finished -
475 b_rcu_perf_writer_started);
476 for (i = 0; i < nrealwriters; i++) {
477 if (!writer_durations)
478 break;
479 if (!writer_n_durations)
480 continue;
481 wdpp = writer_durations[i];
482 if (!wdpp)
483 continue;
484 for (j = 0; j <= writer_n_durations[i]; j++) {
485 wdp = &wdpp[j];
486 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
487 perf_type, PERF_FLAG,
488 i, j, *wdp);
489 if (j % 100 == 0)
490 schedule_timeout_uninterruptible(1);
491 }
492 kfree(writer_durations[i]);
493 }
494 kfree(writer_tasks);
495 kfree(writer_durations);
496 kfree(writer_n_durations);
497 }
498
499 /* Do flavor-specific cleanup operations. */
500 if (cur_ops->cleanup != NULL)
501 cur_ops->cleanup();
502
503 torture_cleanup_end();
504}
505
506/*
507 * Return the number if non-negative. If -1, the number of CPUs.
508 * If less than -1, that much less than the number of CPUs, but
509 * at least one.
510 */
511static int compute_real(int n)
512{
513 int nr;
514
515 if (n >= 0) {
516 nr = n;
517 } else {
518 nr = num_online_cpus() + 1 + n;
519 if (nr <= 0)
520 nr = 1;
521 }
522 return nr;
523}
524
525/*
526 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
527 * down system.
528 */
529static int
530rcu_perf_shutdown(void *arg)
531{
532 do {
533 wait_event(shutdown_wq,
534 atomic_read(&n_rcu_perf_writer_finished) >=
535 nrealwriters);
536 } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
537 smp_mb(); /* Wake before output. */
538 rcu_perf_cleanup();
539 kernel_power_off();
540 return -EINVAL;
541}
542
543static int __init
544rcu_perf_init(void)
545{
546 long i;
547 int firsterr = 0;
548 static struct rcu_perf_ops *perf_ops[] = {
549 &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
550 RCUPERF_TASKS_OPS
551 };
552
553 if (!torture_init_begin(perf_type, verbose, &perf_runnable))
554 return -EBUSY;
555
556 /* Process args and tell the world that the perf'er is on the job. */
557 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
558 cur_ops = perf_ops[i];
559 if (strcmp(perf_type, cur_ops->name) == 0)
560 break;
561 }
562 if (i == ARRAY_SIZE(perf_ops)) {
563 pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
564 perf_type);
565 pr_alert("rcu-perf types:");
566 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
567 pr_alert(" %s", perf_ops[i]->name);
568 pr_alert("\n");
569 firsterr = -EINVAL;
570 goto unwind;
571 }
572 if (cur_ops->init)
573 cur_ops->init();
574
575 nrealwriters = compute_real(nwriters);
576 nrealreaders = compute_real(nreaders);
577 atomic_set(&n_rcu_perf_reader_started, 0);
578 atomic_set(&n_rcu_perf_writer_started, 0);
579 atomic_set(&n_rcu_perf_writer_finished, 0);
580 rcu_perf_print_module_parms(cur_ops, "Start of test");
581
582 /* Start up the kthreads. */
583
584 if (shutdown) {
585 init_waitqueue_head(&shutdown_wq);
586 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
587 shutdown_task);
588 if (firsterr)
589 goto unwind;
590 schedule_timeout_uninterruptible(1);
591 }
592 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
593 GFP_KERNEL);
594 if (reader_tasks == NULL) {
595 VERBOSE_PERFOUT_ERRSTRING("out of memory");
596 firsterr = -ENOMEM;
597 goto unwind;
598 }
599 for (i = 0; i < nrealreaders; i++) {
Paul E. McKenney6b558c42016-01-12 14:15:40 -0800600 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
Paul E. McKenney8704baa2015-12-31 18:33:22 -0800601 reader_tasks[i]);
602 if (firsterr)
603 goto unwind;
604 }
605 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
606 schedule_timeout_uninterruptible(1);
607 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
608 GFP_KERNEL);
609 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
610 GFP_KERNEL);
611 writer_n_durations =
612 kcalloc(nrealwriters, sizeof(*writer_n_durations),
613 GFP_KERNEL);
614 if (!writer_tasks || !writer_durations || !writer_n_durations) {
615 VERBOSE_PERFOUT_ERRSTRING("out of memory");
616 firsterr = -ENOMEM;
617 goto unwind;
618 }
619 for (i = 0; i < nrealwriters; i++) {
620 writer_durations[i] =
621 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
622 GFP_KERNEL);
623 if (!writer_durations[i])
624 goto unwind;
625 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
626 writer_tasks[i]);
627 if (firsterr)
628 goto unwind;
629 }
630 torture_init_end();
631 return 0;
632
633unwind:
634 torture_init_end();
635 rcu_perf_cleanup();
636 return firsterr;
637}
638
639module_init(rcu_perf_init);
640module_exit(rcu_perf_cleanup);