blob: 2138c40f4836e76c9d949e482fba13e783002bbe [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
23/*
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020024 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
25 */
26#ifdef CONFIG_SCHED_DEBUG
27# define const_debug __read_mostly
28#else
29# define const_debug static const
30#endif
31
32/*
Peter Zijlstra21805082007-08-25 18:41:53 +020033 * Targeted preemption latency for CPU-bound tasks:
34 * (default: 20ms, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020035 *
Peter Zijlstra21805082007-08-25 18:41:53 +020036 * NOTE: this latency value is not the same as the concept of
37 * 'timeslice length' - timeslices in CFS are of variable length.
38 * (to see the precise effective timeslice length of your workload,
39 * run vmstat and monitor the context-switches field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
41 * On SMP systems the value of this is multiplied by the log2 of the
42 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
43 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
Peter Zijlstra21805082007-08-25 18:41:53 +020044 * Targeted preemption latency for CPU-bound tasks:
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020046const_debug unsigned int sysctl_sched_latency = 20000000ULL;
47
48/*
49 * After fork, child runs first. (default) If set to 0 then
50 * parent will (try to) run first.
51 */
52const_debug unsigned int sysctl_sched_child_runs_first = 1;
Peter Zijlstra21805082007-08-25 18:41:53 +020053
54/*
55 * Minimal preemption granularity for CPU-bound tasks:
56 * (default: 2 msec, units: nanoseconds)
57 */
Ingo Molnar172ac3d2007-08-25 18:41:53 +020058unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020059
60/*
Ingo Molnar1799e352007-09-19 23:34:46 +020061 * sys_sched_yield() compat mode
62 *
63 * This option switches the agressive yield implementation of the
64 * old scheduler back on.
65 */
66unsigned int __read_mostly sysctl_sched_compat_yield;
67
68/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020069 * SCHED_BATCH wake-up granularity.
Ingo Molnar71fd3712007-08-24 20:39:10 +020070 * (default: 25 msec, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020071 *
72 * This option delays the preemption effects of decoupled workloads
73 * and reduces their over-scheduling. Synchronous workloads will still
74 * have immediate wakeup/sleep latencies.
75 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020076const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020077
78/*
79 * SCHED_OTHER wake-up granularity.
80 * (default: 1 msec, units: nanoseconds)
81 *
82 * This option delays the preemption effects of decoupled workloads
83 * and reduces their over-scheduling. Synchronous workloads will still
84 * have immediate wakeup/sleep latencies.
85 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020086const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020087
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020088unsigned int sysctl_sched_runtime_limit __read_mostly;
89
90/*
91 * Debugging: various feature bits
92 */
93enum {
94 SCHED_FEAT_FAIR_SLEEPERS = 1,
95 SCHED_FEAT_SLEEPER_AVG = 2,
96 SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
97 SCHED_FEAT_PRECISE_CPU_LOAD = 8,
98 SCHED_FEAT_START_DEBIT = 16,
99 SCHED_FEAT_SKIP_INITIAL = 32,
100};
101
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +0200102const_debug unsigned int sysctl_sched_features =
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200103 SCHED_FEAT_FAIR_SLEEPERS *1 |
Ingo Molnar5d2b3d32007-08-12 18:08:19 +0200104 SCHED_FEAT_SLEEPER_AVG *0 |
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200105 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
106 SCHED_FEAT_PRECISE_CPU_LOAD *1 |
107 SCHED_FEAT_START_DEBIT *1 |
108 SCHED_FEAT_SKIP_INITIAL *0;
109
110extern struct sched_class fair_sched_class;
111
112/**************************************************************
113 * CFS operations on generic schedulable entities:
114 */
115
116#ifdef CONFIG_FAIR_GROUP_SCHED
117
118/* cpu runqueue to which this cfs_rq is attached */
119static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
120{
121 return cfs_rq->rq;
122}
123
124/* currently running entity (if any) on this cfs_rq */
125static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
126{
127 return cfs_rq->curr;
128}
129
130/* An entity is a task if it doesn't "own" a runqueue */
131#define entity_is_task(se) (!se->my_q)
132
133static inline void
134set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
135{
136 cfs_rq->curr = se;
137}
138
139#else /* CONFIG_FAIR_GROUP_SCHED */
140
141static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
142{
143 return container_of(cfs_rq, struct rq, cfs);
144}
145
146static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
147{
148 struct rq *rq = rq_of(cfs_rq);
149
150 if (unlikely(rq->curr->sched_class != &fair_sched_class))
151 return NULL;
152
153 return &rq->curr->se;
154}
155
156#define entity_is_task(se) 1
157
158static inline void
159set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
160
161#endif /* CONFIG_FAIR_GROUP_SCHED */
162
163static inline struct task_struct *task_of(struct sched_entity *se)
164{
165 return container_of(se, struct task_struct, se);
166}
167
168
169/**************************************************************
170 * Scheduling class tree data structure manipulation methods:
171 */
172
173/*
174 * Enqueue an entity into the rb-tree:
175 */
176static inline void
177__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
178{
179 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
180 struct rb_node *parent = NULL;
181 struct sched_entity *entry;
182 s64 key = se->fair_key;
183 int leftmost = 1;
184
185 /*
186 * Find the right place in the rbtree:
187 */
188 while (*link) {
189 parent = *link;
190 entry = rb_entry(parent, struct sched_entity, run_node);
191 /*
192 * We dont care about collisions. Nodes with
193 * the same key stay together.
194 */
195 if (key - entry->fair_key < 0) {
196 link = &parent->rb_left;
197 } else {
198 link = &parent->rb_right;
199 leftmost = 0;
200 }
201 }
202
203 /*
204 * Maintain a cache of leftmost tree entries (it is frequently
205 * used):
206 */
207 if (leftmost)
208 cfs_rq->rb_leftmost = &se->run_node;
209
210 rb_link_node(&se->run_node, parent, link);
211 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
212 update_load_add(&cfs_rq->load, se->load.weight);
213 cfs_rq->nr_running++;
214 se->on_rq = 1;
Ingo Molnara206c072007-09-05 14:32:49 +0200215
216 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200217}
218
219static inline void
220__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
221{
222 if (cfs_rq->rb_leftmost == &se->run_node)
223 cfs_rq->rb_leftmost = rb_next(&se->run_node);
224 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
225 update_load_sub(&cfs_rq->load, se->load.weight);
226 cfs_rq->nr_running--;
227 se->on_rq = 0;
Ingo Molnara206c072007-09-05 14:32:49 +0200228
229 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200230}
231
232static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
233{
234 return cfs_rq->rb_leftmost;
235}
236
237static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
238{
239 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
240}
241
242/**************************************************************
243 * Scheduling class statistics methods:
244 */
245
246/*
Peter Zijlstra21805082007-08-25 18:41:53 +0200247 * Calculate the preemption granularity needed to schedule every
248 * runnable task once per sysctl_sched_latency amount of time.
249 * (down to a sensible low limit on granularity)
250 *
251 * For example, if there are 2 tasks running and latency is 10 msecs,
252 * we switch tasks every 5 msecs. If we have 3 tasks running, we have
253 * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
254 * for each task. We do finer and finer scheduling up to until we
255 * reach the minimum granularity value.
256 *
257 * To achieve this we use the following dynamic-granularity rule:
258 *
259 * gran = lat/nr - lat/nr/nr
260 *
261 * This comes out of the following equations:
262 *
263 * kA1 + gran = kB1
264 * kB2 + gran = kA2
265 * kA2 = kA1
266 * kB2 = kB1 - d + d/nr
267 * lat = d * nr
268 *
269 * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
270 * '1' is start of time, '2' is end of time, 'd' is delay between
271 * 1 and 2 (during which task B was running), 'nr' is number of tasks
272 * running, 'lat' is the the period of each task. ('lat' is the
273 * sched_latency that we aim for.)
274 */
275static long
276sched_granularity(struct cfs_rq *cfs_rq)
277{
278 unsigned int gran = sysctl_sched_latency;
279 unsigned int nr = cfs_rq->nr_running;
280
281 if (nr > 1) {
282 gran = gran/nr - gran/nr/nr;
Ingo Molnar172ac3d2007-08-25 18:41:53 +0200283 gran = max(gran, sysctl_sched_min_granularity);
Peter Zijlstra21805082007-08-25 18:41:53 +0200284 }
285
286 return gran;
287}
288
289/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200290 * We rescale the rescheduling granularity of tasks according to their
291 * nice level, but only linearly, not exponentially:
292 */
293static long
294niced_granularity(struct sched_entity *curr, unsigned long granularity)
295{
296 u64 tmp;
297
Ingo Molnar7cff8cf2007-08-09 11:16:52 +0200298 if (likely(curr->load.weight == NICE_0_LOAD))
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200299 return granularity;
300 /*
Ingo Molnar7cff8cf2007-08-09 11:16:52 +0200301 * Positive nice levels get the same granularity as nice-0:
302 */
303 if (likely(curr->load.weight < NICE_0_LOAD)) {
304 tmp = curr->load.weight * (u64)granularity;
305 return (long) (tmp >> NICE_0_SHIFT);
306 }
307 /*
308 * Negative nice level tasks get linearly finer
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200309 * granularity:
310 */
Ingo Molnar7cff8cf2007-08-09 11:16:52 +0200311 tmp = curr->load.inv_weight * (u64)granularity;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200312
313 /*
314 * It will always fit into 'long':
315 */
Ingo Molnara0dc7262007-09-05 14:32:49 +0200316 return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200317}
318
319static inline void
320limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
321{
322 long limit = sysctl_sched_runtime_limit;
323
324 /*
325 * Niced tasks have the same history dynamic range as
326 * non-niced tasks:
327 */
328 if (unlikely(se->wait_runtime > limit)) {
329 se->wait_runtime = limit;
330 schedstat_inc(se, wait_runtime_overruns);
331 schedstat_inc(cfs_rq, wait_runtime_overruns);
332 }
333 if (unlikely(se->wait_runtime < -limit)) {
334 se->wait_runtime = -limit;
335 schedstat_inc(se, wait_runtime_underruns);
336 schedstat_inc(cfs_rq, wait_runtime_underruns);
337 }
338}
339
340static inline void
341__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
342{
343 se->wait_runtime += delta;
344 schedstat_add(se, sum_wait_runtime, delta);
345 limit_wait_runtime(cfs_rq, se);
346}
347
348static void
349add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
350{
351 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
352 __add_wait_runtime(cfs_rq, se, delta);
353 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
354}
355
356/*
357 * Update the current task's runtime statistics. Skip current tasks that
358 * are not in our scheduling class.
359 */
360static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200361__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
362 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200363{
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200364 unsigned long delta, delta_fair, delta_mine;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200365 struct load_weight *lw = &cfs_rq->load;
366 unsigned long load = lw->weight;
367
Ingo Molnar8179ca232007-08-02 17:41:40 +0200368 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200369
370 curr->sum_exec_runtime += delta_exec;
371 cfs_rq->exec_clock += delta_exec;
372
Ingo Molnarfd8bb432007-08-09 11:16:46 +0200373 if (unlikely(!load))
374 return;
375
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200376 delta_fair = calc_delta_fair(delta_exec, lw);
377 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
378
Mike Galbraith5f01d512007-08-28 12:53:24 +0200379 if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
Peter Zijlstraea0aa3b2007-08-24 20:39:10 +0200380 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
Ingo Molnarb2133c82007-08-24 20:39:10 +0200381 delta = min(delta, (unsigned long)(
382 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200383 cfs_rq->sleeper_bonus -= delta;
384 delta_mine -= delta;
385 }
386
387 cfs_rq->fair_clock += delta_fair;
388 /*
389 * We executed delta_exec amount of time on the CPU,
390 * but we were only entitled to delta_mine amount of
391 * time during that period (if nr_running == 1 then
392 * the two values are equal)
393 * [Note: delta_mine - delta_exec is negative]:
394 */
395 add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
396}
397
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200398static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200399{
400 struct sched_entity *curr = cfs_rq_curr(cfs_rq);
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200401 u64 now = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200402 unsigned long delta_exec;
403
404 if (unlikely(!curr))
405 return;
406
407 /*
408 * Get the amount of time the current task was running
409 * since the last time we changed load (this cannot
410 * overflow on 32 bits):
411 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200412 delta_exec = (unsigned long)(now - curr->exec_start);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200413
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200414 __update_curr(cfs_rq, curr, delta_exec);
415 curr->exec_start = now;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200416}
417
418static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200419update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200420{
421 se->wait_start_fair = cfs_rq->fair_clock;
Ingo Molnard2819182007-08-09 11:16:47 +0200422 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200423}
424
425/*
426 * We calculate fair deltas here, so protect against the random effects
427 * of a multiplication overflow by capping it to the runtime limit:
428 */
429#if BITS_PER_LONG == 32
430static inline unsigned long
431calc_weighted(unsigned long delta, unsigned long weight, int shift)
432{
433 u64 tmp = (u64)delta * weight >> shift;
434
435 if (unlikely(tmp > sysctl_sched_runtime_limit*2))
436 return sysctl_sched_runtime_limit*2;
437 return tmp;
438}
439#else
440static inline unsigned long
441calc_weighted(unsigned long delta, unsigned long weight, int shift)
442{
443 return delta * weight >> shift;
444}
445#endif
446
447/*
448 * Task is being enqueued - update stats:
449 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200450static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200451{
452 s64 key;
453
454 /*
455 * Are we enqueueing a waiting task? (for current tasks
456 * a dequeue/enqueue event is a NOP)
457 */
458 if (se != cfs_rq_curr(cfs_rq))
Ingo Molnar5870db52007-08-09 11:16:47 +0200459 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200460 /*
461 * Update the key:
462 */
463 key = cfs_rq->fair_clock;
464
465 /*
466 * Optimize the common nice 0 case:
467 */
468 if (likely(se->load.weight == NICE_0_LOAD)) {
469 key -= se->wait_runtime;
470 } else {
471 u64 tmp;
472
473 if (se->wait_runtime < 0) {
474 tmp = -se->wait_runtime;
475 key += (tmp * se->load.inv_weight) >>
476 (WMULT_SHIFT - NICE_0_SHIFT);
477 } else {
478 tmp = se->wait_runtime;
Ingo Molnara69edb52007-08-09 11:16:52 +0200479 key -= (tmp * se->load.inv_weight) >>
480 (WMULT_SHIFT - NICE_0_SHIFT);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200481 }
482 }
483
484 se->fair_key = key;
485}
486
487/*
488 * Note: must be called with a freshly updated rq->fair_clock.
489 */
490static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200491__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
492 unsigned long delta_fair)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200493{
Ingo Molnard2819182007-08-09 11:16:47 +0200494 schedstat_set(se->wait_max, max(se->wait_max,
495 rq_of(cfs_rq)->clock - se->wait_start));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200496
497 if (unlikely(se->load.weight != NICE_0_LOAD))
498 delta_fair = calc_weighted(delta_fair, se->load.weight,
499 NICE_0_SHIFT);
500
501 add_wait_runtime(cfs_rq, se, delta_fair);
502}
503
504static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200505update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 unsigned long delta_fair;
508
Ingo Molnarb77d69d2007-08-28 12:53:24 +0200509 if (unlikely(!se->wait_start_fair))
510 return;
511
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200512 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
513 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
514
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200515 __update_stats_wait_end(cfs_rq, se, delta_fair);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200516
517 se->wait_start_fair = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200518 schedstat_set(se->wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200519}
520
521static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200522update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523{
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200524 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200525 /*
526 * Mark the end of the wait period if dequeueing a
527 * waiting task:
528 */
529 if (se != cfs_rq_curr(cfs_rq))
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200530 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200531}
532
533/*
534 * We are picking a new current task - update its stats:
535 */
536static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200537update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200538{
539 /*
540 * We are starting a new run period:
541 */
Ingo Molnard2819182007-08-09 11:16:47 +0200542 se->exec_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200543}
544
545/*
546 * We are descheduling a task - update its stats:
547 */
548static inline void
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200549update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550{
551 se->exec_start = 0;
552}
553
554/**************************************************
555 * Scheduling class queueing methods:
556 */
557
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200558static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
559 unsigned long delta_fair)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200560{
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200561 unsigned long load = cfs_rq->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200562 long prev_runtime;
563
Ingo Molnarb2133c82007-08-24 20:39:10 +0200564 /*
565 * Do not boost sleepers if there's too much bonus 'in flight'
566 * already:
567 */
568 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
569 return;
570
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200571 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
572 load = rq_of(cfs_rq)->cpu_load[2];
573
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200574 /*
575 * Fix up delta_fair with the effect of us running
576 * during the whole sleep period:
577 */
578 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
579 delta_fair = div64_likely32((u64)delta_fair * load,
580 load + se->load.weight);
581
582 if (unlikely(se->load.weight != NICE_0_LOAD))
583 delta_fair = calc_weighted(delta_fair, se->load.weight,
584 NICE_0_SHIFT);
585
586 prev_runtime = se->wait_runtime;
587 __add_wait_runtime(cfs_rq, se, delta_fair);
588 delta_fair = se->wait_runtime - prev_runtime;
589
590 /*
591 * Track the amount of bonus we've given to sleepers:
592 */
593 cfs_rq->sleeper_bonus += delta_fair;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200594}
595
Ingo Molnar2396af62007-08-09 11:16:48 +0200596static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200597{
598 struct task_struct *tsk = task_of(se);
599 unsigned long delta_fair;
600
601 if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
602 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
603 return;
604
605 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
606 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
607
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200608 __enqueue_sleeper(cfs_rq, se, delta_fair);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200609
610 se->sleep_start_fair = 0;
611
612#ifdef CONFIG_SCHEDSTATS
613 if (se->sleep_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200614 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200615
616 if ((s64)delta < 0)
617 delta = 0;
618
619 if (unlikely(delta > se->sleep_max))
620 se->sleep_max = delta;
621
622 se->sleep_start = 0;
623 se->sum_sleep_runtime += delta;
624 }
625 if (se->block_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200626 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200627
628 if ((s64)delta < 0)
629 delta = 0;
630
631 if (unlikely(delta > se->block_max))
632 se->block_max = delta;
633
634 se->block_start = 0;
635 se->sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +0200636
637 /*
638 * Blocking time is in units of nanosecs, so shift by 20 to
639 * get a milliseconds-range estimation of the amount of
640 * time that the task spent sleeping:
641 */
642 if (unlikely(prof_on == SLEEP_PROFILING)) {
643 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
644 delta >> 20);
645 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200646 }
647#endif
648}
649
650static void
Ingo Molnar668031c2007-08-09 11:16:48 +0200651enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200652{
653 /*
654 * Update the fair clock.
655 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200656 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200657
658 if (wakeup)
Ingo Molnar2396af62007-08-09 11:16:48 +0200659 enqueue_sleeper(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200660
Ingo Molnard2417e52007-08-09 11:16:47 +0200661 update_stats_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200662 __enqueue_entity(cfs_rq, se);
663}
664
665static void
Ingo Molnar525c2712007-08-09 11:16:48 +0200666dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200667{
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200668 update_stats_dequeue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200669 if (sleep) {
670 se->sleep_start_fair = cfs_rq->fair_clock;
671#ifdef CONFIG_SCHEDSTATS
672 if (entity_is_task(se)) {
673 struct task_struct *tsk = task_of(se);
674
675 if (tsk->state & TASK_INTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200676 se->sleep_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200677 if (tsk->state & TASK_UNINTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200678 se->block_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200679 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200680#endif
681 }
682 __dequeue_entity(cfs_rq, se);
683}
684
685/*
686 * Preempt the current task with a newly woken task if needed:
687 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +0200688static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200689__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
690 struct sched_entity *curr, unsigned long granularity)
691{
692 s64 __delta = curr->fair_key - se->fair_key;
Peter Zijlstra11697832007-09-05 14:32:49 +0200693 unsigned long ideal_runtime, delta_exec;
694
695 /*
696 * ideal_runtime is compared against sum_exec_runtime, which is
697 * walltime, hence do not scale.
698 */
699 ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
700 (unsigned long)sysctl_sched_min_granularity);
701
702 /*
703 * If we executed more than what the latency constraint suggests,
704 * reduce the rescheduling granularity. This way the total latency
705 * of how much a task is not scheduled converges to
706 * sysctl_sched_latency:
707 */
708 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
709 if (delta_exec > ideal_runtime)
710 granularity = 0;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200711
712 /*
713 * Take scheduling granularity into account - do not
714 * preempt the current task unless the best task has
715 * a larger than sched_granularity fairness advantage:
Peter Zijlstra11697832007-09-05 14:32:49 +0200716 *
717 * scale granularity as key space is in fair_clock.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200718 */
Peter Zijlstra4a55b452007-09-05 14:32:49 +0200719 if (__delta > niced_granularity(curr, granularity))
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200720 resched_task(rq_of(cfs_rq)->curr);
721}
722
723static inline void
Ingo Molnar8494f412007-08-09 11:16:48 +0200724set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725{
726 /*
727 * Any task has to be enqueued before it get to execute on
728 * a CPU. So account for the time it spent waiting on the
729 * runqueue. (note, here we rely on pick_next_task() having
730 * done a put_prev_task_fair() shortly before this, which
731 * updated rq->fair_clock - used by update_stats_wait_end())
732 */
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200733 update_stats_wait_end(cfs_rq, se);
Ingo Molnar79303e92007-08-09 11:16:47 +0200734 update_stats_curr_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200735 set_cfs_rq_curr(cfs_rq, se);
Ingo Molnareba1ed42007-10-15 17:00:02 +0200736#ifdef CONFIG_SCHEDSTATS
737 /*
738 * Track our maximum slice length, if the CPU's load is at
739 * least twice that of our own weight (i.e. dont track it
740 * when there are only lesser-weight tasks around):
741 */
742 if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
743 se->slice_max = max(se->slice_max,
744 se->sum_exec_runtime - se->prev_sum_exec_runtime);
745 }
746#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +0200747 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200748}
749
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200750static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200751{
752 struct sched_entity *se = __pick_next_entity(cfs_rq);
753
Ingo Molnar8494f412007-08-09 11:16:48 +0200754 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755
756 return se;
757}
758
Ingo Molnarab6cde22007-08-09 11:16:48 +0200759static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760{
761 /*
762 * If still on the runqueue then deactivate_task()
763 * was not called and update_curr() has to be done:
764 */
765 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200766 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200767
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200768 update_stats_curr_end(cfs_rq, prev);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200769
770 if (prev->on_rq)
Ingo Molnar5870db52007-08-09 11:16:47 +0200771 update_stats_wait_start(cfs_rq, prev);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200772 set_cfs_rq_curr(cfs_rq, NULL);
773}
774
775static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
776{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777 struct sched_entity *next;
Ingo Molnarc1b3da32007-08-09 11:16:47 +0200778
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200779 /*
780 * Dequeue and enqueue the task to update its
781 * position within the tree:
782 */
Ingo Molnar525c2712007-08-09 11:16:48 +0200783 dequeue_entity(cfs_rq, curr, 0);
Ingo Molnar668031c2007-08-09 11:16:48 +0200784 enqueue_entity(cfs_rq, curr, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200785
786 /*
787 * Reschedule if another task tops the current one.
788 */
789 next = __pick_next_entity(cfs_rq);
790 if (next == curr)
791 return;
792
Peter Zijlstra11697832007-09-05 14:32:49 +0200793 __check_preempt_curr_fair(cfs_rq, next, curr,
794 sched_granularity(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795}
796
797/**************************************************
798 * CFS operations on tasks:
799 */
800
801#ifdef CONFIG_FAIR_GROUP_SCHED
802
803/* Walk up scheduling entities hierarchy */
804#define for_each_sched_entity(se) \
805 for (; se; se = se->parent)
806
807static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
808{
809 return p->se.cfs_rq;
810}
811
812/* runqueue on which this entity is (to be) queued */
813static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
814{
815 return se->cfs_rq;
816}
817
818/* runqueue "owned" by this group */
819static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
820{
821 return grp->my_q;
822}
823
824/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
825 * another cpu ('this_cpu')
826 */
827static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
828{
829 /* A later patch will take group into account */
830 return &cpu_rq(this_cpu)->cfs;
831}
832
833/* Iterate thr' all leaf cfs_rq's on a runqueue */
834#define for_each_leaf_cfs_rq(rq, cfs_rq) \
835 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
836
837/* Do the two (enqueued) tasks belong to the same group ? */
838static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
839{
840 if (curr->se.cfs_rq == p->se.cfs_rq)
841 return 1;
842
843 return 0;
844}
845
846#else /* CONFIG_FAIR_GROUP_SCHED */
847
848#define for_each_sched_entity(se) \
849 for (; se; se = NULL)
850
851static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
852{
853 return &task_rq(p)->cfs;
854}
855
856static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
857{
858 struct task_struct *p = task_of(se);
859 struct rq *rq = task_rq(p);
860
861 return &rq->cfs;
862}
863
864/* runqueue "owned" by this group */
865static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
866{
867 return NULL;
868}
869
870static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
871{
872 return &cpu_rq(this_cpu)->cfs;
873}
874
875#define for_each_leaf_cfs_rq(rq, cfs_rq) \
876 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
877
878static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
879{
880 return 1;
881}
882
883#endif /* CONFIG_FAIR_GROUP_SCHED */
884
885/*
886 * The enqueue_task method is called before nr_running is
887 * increased. Here we update the fair scheduling stats and
888 * then put the task into the rbtree:
889 */
Ingo Molnarfd390f62007-08-09 11:16:48 +0200890static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200891{
892 struct cfs_rq *cfs_rq;
893 struct sched_entity *se = &p->se;
894
895 for_each_sched_entity(se) {
896 if (se->on_rq)
897 break;
898 cfs_rq = cfs_rq_of(se);
Ingo Molnar668031c2007-08-09 11:16:48 +0200899 enqueue_entity(cfs_rq, se, wakeup);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200900 }
901}
902
903/*
904 * The dequeue_task method is called before nr_running is
905 * decreased. We remove the task from the rbtree and
906 * update the fair scheduling stats:
907 */
Ingo Molnarf02231e2007-08-09 11:16:48 +0200908static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200909{
910 struct cfs_rq *cfs_rq;
911 struct sched_entity *se = &p->se;
912
913 for_each_sched_entity(se) {
914 cfs_rq = cfs_rq_of(se);
Ingo Molnar525c2712007-08-09 11:16:48 +0200915 dequeue_entity(cfs_rq, se, sleep);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200916 /* Don't dequeue parent if it has other entities besides us */
917 if (cfs_rq->load.weight)
918 break;
919 }
920}
921
922/*
Ingo Molnar1799e352007-09-19 23:34:46 +0200923 * sched_yield() support is very simple - we dequeue and enqueue.
924 *
925 * If compat_yield is turned on then we requeue to the end of the tree.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200926 */
927static void yield_task_fair(struct rq *rq, struct task_struct *p)
928{
929 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ingo Molnar1799e352007-09-19 23:34:46 +0200930 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
931 struct sched_entity *rightmost, *se = &p->se;
932 struct rb_node *parent;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200933
934 /*
Ingo Molnar1799e352007-09-19 23:34:46 +0200935 * Are we the only task in the tree?
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200936 */
Ingo Molnar1799e352007-09-19 23:34:46 +0200937 if (unlikely(cfs_rq->nr_running == 1))
938 return;
939
940 if (likely(!sysctl_sched_compat_yield)) {
941 __update_rq_clock(rq);
942 /*
943 * Dequeue and enqueue the task to update its
944 * position within the tree:
945 */
946 dequeue_entity(cfs_rq, &p->se, 0);
947 enqueue_entity(cfs_rq, &p->se, 0);
948
949 return;
950 }
951 /*
952 * Find the rightmost entry in the rbtree:
953 */
954 do {
955 parent = *link;
956 link = &parent->rb_right;
957 } while (*link);
958
959 rightmost = rb_entry(parent, struct sched_entity, run_node);
960 /*
961 * Already in the rightmost position?
962 */
963 if (unlikely(rightmost == se))
964 return;
965
966 /*
967 * Minimally necessary key value to be last in the tree:
968 */
969 se->fair_key = rightmost->fair_key + 1;
970
971 if (cfs_rq->rb_leftmost == &se->run_node)
972 cfs_rq->rb_leftmost = rb_next(&se->run_node);
973 /*
974 * Relink the task to the rightmost position:
975 */
976 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
977 rb_link_node(&se->run_node, parent, link);
978 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200979}
980
981/*
982 * Preempt the current task with a newly woken task if needed:
983 */
984static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
985{
986 struct task_struct *curr = rq->curr;
987 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
988 unsigned long gran;
989
990 if (unlikely(rt_prio(p->prio))) {
Ingo Molnara8e504d2007-08-09 11:16:47 +0200991 update_rq_clock(rq);
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200992 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200993 resched_task(curr);
994 return;
995 }
996
997 gran = sysctl_sched_wakeup_granularity;
998 /*
999 * Batch tasks prefer throughput over latency:
1000 */
1001 if (unlikely(p->policy == SCHED_BATCH))
1002 gran = sysctl_sched_batch_wakeup_granularity;
1003
1004 if (is_same_group(curr, p))
1005 __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
1006}
1007
Ingo Molnarfb8d4722007-08-09 11:16:48 +02001008static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001009{
1010 struct cfs_rq *cfs_rq = &rq->cfs;
1011 struct sched_entity *se;
1012
1013 if (unlikely(!cfs_rq->nr_running))
1014 return NULL;
1015
1016 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02001017 se = pick_next_entity(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001018 cfs_rq = group_cfs_rq(se);
1019 } while (cfs_rq);
1020
1021 return task_of(se);
1022}
1023
1024/*
1025 * Account for a descheduled task:
1026 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02001027static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001028{
1029 struct sched_entity *se = &prev->se;
1030 struct cfs_rq *cfs_rq;
1031
1032 for_each_sched_entity(se) {
1033 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02001034 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001035 }
1036}
1037
1038/**************************************************
1039 * Fair scheduling class load-balancing methods:
1040 */
1041
1042/*
1043 * Load-balancing iterator. Note: while the runqueue stays locked
1044 * during the whole iteration, the current task might be
1045 * dequeued so the iterator has to be dequeue-safe. Here we
1046 * achieve that by always pre-iterating before returning
1047 * the current task:
1048 */
1049static inline struct task_struct *
1050__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
1051{
1052 struct task_struct *p;
1053
1054 if (!curr)
1055 return NULL;
1056
1057 p = rb_entry(curr, struct task_struct, se.run_node);
1058 cfs_rq->rb_load_balance_curr = rb_next(curr);
1059
1060 return p;
1061}
1062
1063static struct task_struct *load_balance_start_fair(void *arg)
1064{
1065 struct cfs_rq *cfs_rq = arg;
1066
1067 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
1068}
1069
1070static struct task_struct *load_balance_next_fair(void *arg)
1071{
1072 struct cfs_rq *cfs_rq = arg;
1073
1074 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
1075}
1076
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001077#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001078static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
1079{
1080 struct sched_entity *curr;
1081 struct task_struct *p;
1082
1083 if (!cfs_rq->nr_running)
1084 return MAX_PRIO;
1085
1086 curr = __pick_next_entity(cfs_rq);
1087 p = task_of(curr);
1088
1089 return p->prio;
1090}
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001091#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001092
Peter Williams43010652007-08-09 11:16:46 +02001093static unsigned long
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001094load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001095 unsigned long max_nr_move, unsigned long max_load_move,
1096 struct sched_domain *sd, enum cpu_idle_type idle,
1097 int *all_pinned, int *this_best_prio)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001098{
1099 struct cfs_rq *busy_cfs_rq;
1100 unsigned long load_moved, total_nr_moved = 0, nr_moved;
1101 long rem_load_move = max_load_move;
1102 struct rq_iterator cfs_rq_iterator;
1103
1104 cfs_rq_iterator.start = load_balance_start_fair;
1105 cfs_rq_iterator.next = load_balance_next_fair;
1106
1107 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001108#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001109 struct cfs_rq *this_cfs_rq;
Ingo Molnare56f31a2007-08-10 23:05:11 +02001110 long imbalance;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001111 unsigned long maxload;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001112
1113 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
1114
Ingo Molnare56f31a2007-08-10 23:05:11 +02001115 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001116 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
1117 if (imbalance <= 0)
1118 continue;
1119
1120 /* Don't pull more than imbalance/2 */
1121 imbalance /= 2;
1122 maxload = min(rem_load_move, imbalance);
1123
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001124 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
1125#else
Ingo Molnare56f31a2007-08-10 23:05:11 +02001126# define maxload rem_load_move
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001127#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001128 /* pass busy_cfs_rq argument into
1129 * load_balance_[start|next]_fair iterators
1130 */
1131 cfs_rq_iterator.arg = busy_cfs_rq;
1132 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
1133 max_nr_move, maxload, sd, idle, all_pinned,
Peter Williamsa4ac01c2007-08-09 11:16:46 +02001134 &load_moved, this_best_prio, &cfs_rq_iterator);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001135
1136 total_nr_moved += nr_moved;
1137 max_nr_move -= nr_moved;
1138 rem_load_move -= load_moved;
1139
1140 if (max_nr_move <= 0 || rem_load_move <= 0)
1141 break;
1142 }
1143
Peter Williams43010652007-08-09 11:16:46 +02001144 return max_load_move - rem_load_move;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001145}
1146
1147/*
1148 * scheduler tick hitting a task of our scheduling class:
1149 */
1150static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1151{
1152 struct cfs_rq *cfs_rq;
1153 struct sched_entity *se = &curr->se;
1154
1155 for_each_sched_entity(se) {
1156 cfs_rq = cfs_rq_of(se);
1157 entity_tick(cfs_rq, se);
1158 }
1159}
1160
1161/*
1162 * Share the fairness runtime between parent and child, thus the
1163 * total amount of pressure for CPU stays equal - new tasks
1164 * get a chance to run but frequent forkers are not allowed to
1165 * monopolize the CPU. Note: the parent runqueue is locked,
1166 * the child is not running yet.
1167 */
Ingo Molnaree0827d2007-08-09 11:16:49 +02001168static void task_new_fair(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001169{
1170 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ting Yang7109c4422007-08-28 12:53:24 +02001171 struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001172
1173 sched_info_queued(p);
1174
Ting Yang7109c4422007-08-28 12:53:24 +02001175 update_curr(cfs_rq);
Ingo Molnard2417e52007-08-09 11:16:47 +02001176 update_stats_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001177 /*
1178 * Child runs first: we let it run before the parent
1179 * until it reschedules once. We set up the key so that
1180 * it will preempt the parent:
1181 */
Ingo Molnar9f508f82007-08-28 12:53:24 +02001182 se->fair_key = curr->fair_key -
Ting Yang7109c4422007-08-28 12:53:24 +02001183 niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001184 /*
1185 * The first wait is dominated by the child-runs-first logic,
1186 * so do not credit it with that waiting time yet:
1187 */
1188 if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
Ingo Molnar9f508f82007-08-28 12:53:24 +02001189 se->wait_start_fair = 0;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001190
1191 /*
1192 * The statistical average of wait_runtime is about
1193 * -granularity/2, so initialize the task with that:
1194 */
Ingo Molnara206c072007-09-05 14:32:49 +02001195 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
Ingo Molnar9f508f82007-08-28 12:53:24 +02001196 se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001197
1198 __enqueue_entity(cfs_rq, se);
Ingo Molnarbb61c212007-10-15 17:00:02 +02001199 resched_task(rq->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001200}
1201
1202#ifdef CONFIG_FAIR_GROUP_SCHED
1203/* Account for a task changing its policy or group.
1204 *
1205 * This routine is mostly called to set cfs_rq->curr field when a task
1206 * migrates between groups/classes.
1207 */
1208static void set_curr_task_fair(struct rq *rq)
1209{
Bruce Ashfield7c6c16f2007-08-24 20:39:10 +02001210 struct sched_entity *se = &rq->curr->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001211
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001212 for_each_sched_entity(se)
1213 set_next_entity(cfs_rq_of(se), se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001214}
1215#else
1216static void set_curr_task_fair(struct rq *rq)
1217{
1218}
1219#endif
1220
1221/*
1222 * All the scheduling class methods:
1223 */
1224struct sched_class fair_sched_class __read_mostly = {
1225 .enqueue_task = enqueue_task_fair,
1226 .dequeue_task = dequeue_task_fair,
1227 .yield_task = yield_task_fair,
1228
1229 .check_preempt_curr = check_preempt_curr_fair,
1230
1231 .pick_next_task = pick_next_task_fair,
1232 .put_prev_task = put_prev_task_fair,
1233
1234 .load_balance = load_balance_fair,
1235
1236 .set_curr_task = set_curr_task_fair,
1237 .task_tick = task_tick_fair,
1238 .task_new = task_new_fair,
1239};
1240
1241#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001242static void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001243{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001244 struct cfs_rq *cfs_rq;
1245
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001246 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001247 print_cfs_rq(m, cpu, cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001248}
1249#endif