blob: 5b496132c28a1baca2a6353c6a6b0216a0f02508 [file] [log] [blame]
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
Steven Rostedtc300ba22008-07-09 00:15:33 -04006 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
Peter Zijlstra3e51f332008-05-03 18:29:28 +02009 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 * Create a semi stable clock from a mixture of other events, including:
14 * - gtod
Peter Zijlstra3e51f332008-05-03 18:29:28 +020015 * - sched_clock()
16 * - explicit idle events
17 *
18 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
Peter Zijlstra354879b2008-08-25 17:15:34 +020019 * making it monotonic and keeping it within an expected window.
Peter Zijlstra3e51f332008-05-03 18:29:28 +020020 *
21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
22 * that is otherwise invisible (TSC gets stopped).
23 *
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
Peter Zijlstra354879b2008-08-25 17:15:34 +020025 * consistent between cpus (never more than 2 jiffies difference).
Peter Zijlstra3e51f332008-05-03 18:29:28 +020026 */
Peter Zijlstra3e51f332008-05-03 18:29:28 +020027#include <linux/spinlock.h>
Ingo Molnar6409c4d2008-05-12 21:21:14 +020028#include <linux/hardirq.h>
Peter Zijlstra3e51f332008-05-03 18:29:28 +020029#include <linux/module.h>
Ingo Molnarb3425012009-02-26 20:20:29 +010030#include <linux/percpu.h>
31#include <linux/ktime.h>
32#include <linux/sched.h>
Peter Zijlstra3e51f332008-05-03 18:29:28 +020033
Hugh Dickins2c3d1032008-07-25 19:45:00 +010034/*
35 * Scheduler clock - returns current time in nanosec units.
36 * This is default implementation.
37 * Architectures and sub-architectures can override this.
38 */
39unsigned long long __attribute__((weak)) sched_clock(void)
40{
Ron92d23f72009-05-08 22:54:49 +093041 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
42 * (NSEC_PER_SEC / HZ);
Hugh Dickins2c3d1032008-07-25 19:45:00 +010043}
Peter Zijlstra3e51f332008-05-03 18:29:28 +020044
Peter Zijlstrac1955a32008-08-11 08:59:03 +020045static __read_mostly int sched_clock_running;
46
Peter Zijlstra3e51f332008-05-03 18:29:28 +020047#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Ingo Molnarb3425012009-02-26 20:20:29 +010048__read_mostly int sched_clock_stable;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020049
50struct sched_clock_data {
Peter Zijlstra3e51f332008-05-03 18:29:28 +020051 u64 tick_raw;
52 u64 tick_gtod;
53 u64 clock;
54};
55
56static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
57
58static inline struct sched_clock_data *this_scd(void)
59{
60 return &__get_cpu_var(sched_clock_data);
61}
62
63static inline struct sched_clock_data *cpu_sdc(int cpu)
64{
65 return &per_cpu(sched_clock_data, cpu);
66}
67
68void sched_clock_init(void)
69{
70 u64 ktime_now = ktime_to_ns(ktime_get());
Peter Zijlstra3e51f332008-05-03 18:29:28 +020071 int cpu;
72
73 for_each_possible_cpu(cpu) {
74 struct sched_clock_data *scd = cpu_sdc(cpu);
75
Peter Zijlstraa3817592008-05-29 10:07:15 +020076 scd->tick_raw = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020077 scd->tick_gtod = ktime_now;
78 scd->clock = ktime_now;
79 }
Peter Zijlstraa3817592008-05-29 10:07:15 +020080
81 sched_clock_running = 1;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020082}
83
84/*
Ingo Molnarb3425012009-02-26 20:20:29 +010085 * min, max except they take wrapping into account
Peter Zijlstra354879b2008-08-25 17:15:34 +020086 */
87
88static inline u64 wrap_min(u64 x, u64 y)
89{
90 return (s64)(x - y) < 0 ? x : y;
91}
92
93static inline u64 wrap_max(u64 x, u64 y)
94{
95 return (s64)(x - y) > 0 ? x : y;
96}
97
98/*
Peter Zijlstra3e51f332008-05-03 18:29:28 +020099 * update the percpu scd from the raw @now value
100 *
101 * - filter out backward motion
Peter Zijlstra354879b2008-08-25 17:15:34 +0200102 * - use the GTOD tick value to create a window to filter crazy TSC values
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200103 */
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200104static u64 sched_clock_local(struct sched_clock_data *scd)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200105{
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200106 u64 now, clock, old_clock, min_clock, max_clock;
107 s64 delta;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200108
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200109again:
110 now = sched_clock();
111 delta = now - scd->tick_raw;
Peter Zijlstra354879b2008-08-25 17:15:34 +0200112 if (unlikely(delta < 0))
113 delta = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200114
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200115 old_clock = scd->clock;
116
Peter Zijlstra354879b2008-08-25 17:15:34 +0200117 /*
118 * scd->clock = clamp(scd->tick_gtod + delta,
Ingo Molnarb3425012009-02-26 20:20:29 +0100119 * max(scd->tick_gtod, scd->clock),
120 * scd->tick_gtod + TICK_NSEC);
Peter Zijlstra354879b2008-08-25 17:15:34 +0200121 */
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200122
Peter Zijlstra354879b2008-08-25 17:15:34 +0200123 clock = scd->tick_gtod + delta;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200124 min_clock = wrap_max(scd->tick_gtod, old_clock);
125 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200126
Peter Zijlstra354879b2008-08-25 17:15:34 +0200127 clock = wrap_max(clock, min_clock);
128 clock = wrap_min(clock, max_clock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200129
Eric Dumazet152f9d02009-09-30 20:36:19 +0200130 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200131 goto again;
Ingo Molnar56b90612008-07-30 10:15:55 +0200132
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200133 return clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200134}
135
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200136static u64 sched_clock_remote(struct sched_clock_data *scd)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200137{
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200138 struct sched_clock_data *my_scd = this_scd();
139 u64 this_clock, remote_clock;
140 u64 *ptr, old_val, val;
141
142 sched_clock_local(my_scd);
143again:
144 this_clock = my_scd->clock;
145 remote_clock = scd->clock;
146
147 /*
148 * Use the opportunity that we have both locks
149 * taken to couple the two clocks: we take the
150 * larger time as the latest time for both
151 * runqueues. (this creates monotonic movement)
152 */
153 if (likely((s64)(remote_clock - this_clock) < 0)) {
154 ptr = &scd->clock;
155 old_val = remote_clock;
156 val = this_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200157 } else {
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200158 /*
159 * Should be rare, but possible:
160 */
161 ptr = &my_scd->clock;
162 old_val = this_clock;
163 val = remote_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200164 }
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200165
Eric Dumazet152f9d02009-09-30 20:36:19 +0200166 if (cmpxchg64(ptr, old_val, val) != old_val)
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200167 goto again;
168
169 return val;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200170}
171
172u64 sched_clock_cpu(int cpu)
173{
Ingo Molnarb3425012009-02-26 20:20:29 +0100174 struct sched_clock_data *scd;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200175 u64 clock;
176
177 WARN_ON_ONCE(!irqs_disabled());
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200178
Ingo Molnarb3425012009-02-26 20:20:29 +0100179 if (sched_clock_stable)
180 return sched_clock();
Peter Zijlstraa3817592008-05-29 10:07:15 +0200181
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200182 if (unlikely(!sched_clock_running))
183 return 0ull;
184
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200185 scd = cpu_sdc(cpu);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200186
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200187 if (cpu != smp_processor_id())
188 clock = sched_clock_remote(scd);
189 else
190 clock = sched_clock_local(scd);
Ingo Molnare4e4e532008-04-14 08:50:02 +0200191
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200192 return clock;
193}
194
195void sched_clock_tick(void)
196{
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100197 struct sched_clock_data *scd;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200198 u64 now, now_gtod;
199
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100200 if (sched_clock_stable)
201 return;
202
Peter Zijlstraa3817592008-05-29 10:07:15 +0200203 if (unlikely(!sched_clock_running))
204 return;
205
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200206 WARN_ON_ONCE(!irqs_disabled());
207
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100208 scd = this_scd();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200209 now_gtod = ktime_to_ns(ktime_get());
Steven Rostedta83bc472008-07-09 00:15:32 -0400210 now = sched_clock();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200211
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200212 scd->tick_raw = now;
213 scd->tick_gtod = now_gtod;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200214 sched_clock_local(scd);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200215}
216
217/*
218 * We are going deep-idle (irqs are disabled):
219 */
220void sched_clock_idle_sleep_event(void)
221{
222 sched_clock_cpu(smp_processor_id());
223}
224EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
225
226/*
227 * We just idled delta nanoseconds (called with irqs disabled):
228 */
229void sched_clock_idle_wakeup_event(u64 delta_ns)
230{
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100231 if (timekeeping_suspended)
232 return;
233
Peter Zijlstra354879b2008-08-25 17:15:34 +0200234 sched_clock_tick();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200235 touch_softlockup_watchdog();
236}
237EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
238
David Millerb9f8fcd2009-12-13 18:25:02 -0800239unsigned long long cpu_clock(int cpu)
240{
241 unsigned long long clock;
242 unsigned long flags;
243
244 local_irq_save(flags);
245 clock = sched_clock_cpu(cpu);
246 local_irq_restore(flags);
247
248 return clock;
249}
250
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100251#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
252
253void sched_clock_init(void)
254{
255 sched_clock_running = 1;
256}
257
258u64 sched_clock_cpu(int cpu)
259{
260 if (unlikely(!sched_clock_running))
261 return 0;
262
263 return sched_clock();
264}
265
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200266
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200267unsigned long long cpu_clock(int cpu)
268{
David Millerb9f8fcd2009-12-13 18:25:02 -0800269 return sched_clock_cpu(cpu);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200270}
David Millerb9f8fcd2009-12-13 18:25:02 -0800271
272#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
273
Ingo Molnar4c9fe8a2008-06-27 14:49:35 +0200274EXPORT_SYMBOL_GPL(cpu_clock);