blob: 66f841b7fbd38fd0fcca92d62c4f70860b24bbb2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
7 *
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/profile.h>
18#include <linux/bootmem.h>
19#include <linux/notifier.h>
20#include <linux/mm.h>
21#include <linux/cpumask.h>
22#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/highmem.h>
Arjan van de Ven97d1f152006-03-23 03:00:24 -080024#include <linux/mutex.h>
Dave Hansen22b8ce92008-10-15 22:01:46 -070025#include <linux/slab.h>
26#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/sections.h>
David Howells7d12e782006-10-05 14:55:46 +010028#include <asm/irq_regs.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040029#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31struct profile_hit {
32 u32 pc, hits;
33};
34#define PROFILE_GRPSHIFT 3
35#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
36#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
37#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
38
39/* Oprofile timer tick hook */
Adrian Bunkb012d342007-10-16 23:29:26 -070040static int (*timer_hook)(struct pt_regs *) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42static atomic_t *prof_buffer;
43static unsigned long prof_len, prof_shift;
Ingo Molnar07031e12007-01-10 23:15:38 -080044
Ingo Molnarece8a682006-12-06 20:37:24 -080045int prof_on __read_mostly;
Ingo Molnar07031e12007-01-10 23:15:38 -080046EXPORT_SYMBOL_GPL(prof_on);
47
Rusty Russellc309b912009-01-01 10:12:27 +103048static cpumask_var_t prof_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#ifdef CONFIG_SMP
50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51static DEFINE_PER_CPU(int, cpu_profile_flip);
Arjan van de Ven97d1f152006-03-23 03:00:24 -080052static DEFINE_MUTEX(profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif /* CONFIG_SMP */
54
Dave Hansen22b8ce92008-10-15 22:01:46 -070055int profile_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Dave Hansen22b8ce92008-10-15 22:01:46 -070057 static char schedstr[] = "schedule";
58 static char sleepstr[] = "sleep";
59 static char kvmstr[] = "kvm";
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 int par;
61
Ingo Molnarece8a682006-12-06 20:37:24 -080062 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
Mel Gormanb3da2a72007-10-24 18:23:50 +020063#ifdef CONFIG_SCHEDSTATS
Ingo Molnarece8a682006-12-06 20:37:24 -080064 prof_on = SLEEP_PROFILING;
65 if (str[strlen(sleepstr)] == ',')
66 str += strlen(sleepstr) + 1;
67 if (get_option(&str, &par))
68 prof_shift = par;
69 printk(KERN_INFO
70 "kernel sleep profiling enabled (shift: %ld)\n",
71 prof_shift);
Mel Gormanb3da2a72007-10-24 18:23:50 +020072#else
73 printk(KERN_WARNING
74 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
75#endif /* CONFIG_SCHEDSTATS */
Ingo Molnara75acf82007-01-05 16:36:29 -080076 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 prof_on = SCHED_PROFILING;
William Lee Irwin IIIdfaa9c92005-05-16 21:53:58 -070078 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = par;
82 printk(KERN_INFO
83 "kernel schedule profiling enabled (shift: %ld)\n",
84 prof_shift);
Ingo Molnar07031e12007-01-10 23:15:38 -080085 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
86 prof_on = KVM_PROFILING;
87 if (str[strlen(kvmstr)] == ',')
88 str += strlen(kvmstr) + 1;
89 if (get_option(&str, &par))
90 prof_shift = par;
91 printk(KERN_INFO
92 "kernel KVM profiling enabled (shift: %ld)\n",
93 prof_shift);
William Lee Irwin IIIdfaa9c92005-05-16 21:53:58 -070094 } else if (get_option(&str, &par)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 prof_shift = par;
96 prof_on = CPU_PROFILING;
97 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
98 prof_shift);
99 }
100 return 1;
101}
102__setup("profile=", profile_setup);
103
104
Paul Mundtce05fcc2008-10-29 14:01:07 -0700105int __ref profile_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Dave Hansen22b8ce92008-10-15 22:01:46 -0700107 int buffer_bytes;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100108 if (!prof_on)
Dave Hansen22b8ce92008-10-15 22:01:46 -0700109 return 0;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 /* only text is profiled */
112 prof_len = (_etext - _stext) >> prof_shift;
Dave Hansen22b8ce92008-10-15 22:01:46 -0700113 buffer_bytes = prof_len*sizeof(atomic_t);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700114
Rusty Russellc309b912009-01-01 10:12:27 +1030115 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
116 return -ENOMEM;
117
Hugh Dickinsacd89572009-02-09 19:20:50 +0000118 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
119
Mel Gormanb62f4952009-07-29 15:04:09 -0700120 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700121 if (prof_buffer)
122 return 0;
123
Mel Gormanb62f4952009-07-29 15:04:09 -0700124 prof_buffer = alloc_pages_exact(buffer_bytes,
125 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700126 if (prof_buffer)
127 return 0;
128
129 prof_buffer = vmalloc(buffer_bytes);
Hugh Dickins16a21642010-05-14 19:44:10 -0700130 if (prof_buffer) {
131 memset(prof_buffer, 0, buffer_bytes);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700132 return 0;
Hugh Dickins16a21642010-05-14 19:44:10 -0700133 }
Dave Hansen22b8ce92008-10-15 22:01:46 -0700134
Rusty Russellc309b912009-01-01 10:12:27 +1030135 free_cpumask_var(prof_cpu_mask);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700136 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
139/* Profile event notifications */
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100140
Alan Sterne041c682006-03-27 01:16:30 -0800141static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
142static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
143static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100144
145void profile_task_exit(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Alan Sterne041c682006-03-27 01:16:30 -0800147 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100149
150int profile_handoff_task(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 int ret;
Alan Sterne041c682006-03-27 01:16:30 -0800153 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return (ret == NOTIFY_OK) ? 1 : 0;
155}
156
157void profile_munmap(unsigned long addr)
158{
Alan Sterne041c682006-03-27 01:16:30 -0800159 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100162int task_handoff_register(struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163{
Alan Sterne041c682006-03-27 01:16:30 -0800164 return atomic_notifier_chain_register(&task_free_notifier, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100166EXPORT_SYMBOL_GPL(task_handoff_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100168int task_handoff_unregister(struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Alan Sterne041c682006-03-27 01:16:30 -0800170 return atomic_notifier_chain_unregister(&task_free_notifier, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100172EXPORT_SYMBOL_GPL(task_handoff_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100174int profile_event_register(enum profile_type type, struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 switch (type) {
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100179 case PROFILE_TASK_EXIT:
180 err = blocking_notifier_chain_register(
181 &task_exit_notifier, n);
182 break;
183 case PROFILE_MUNMAP:
184 err = blocking_notifier_chain_register(
185 &munmap_notifier, n);
186 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return err;
190}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100191EXPORT_SYMBOL_GPL(profile_event_register);
192
193int profile_event_unregister(enum profile_type type, struct notifier_block *n)
194{
195 int err = -EINVAL;
196
197 switch (type) {
198 case PROFILE_TASK_EXIT:
199 err = blocking_notifier_chain_unregister(
200 &task_exit_notifier, n);
201 break;
202 case PROFILE_MUNMAP:
203 err = blocking_notifier_chain_unregister(
204 &munmap_notifier, n);
205 break;
206 }
207
208 return err;
209}
210EXPORT_SYMBOL_GPL(profile_event_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212int register_timer_hook(int (*hook)(struct pt_regs *))
213{
214 if (timer_hook)
215 return -EBUSY;
216 timer_hook = hook;
217 return 0;
218}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100219EXPORT_SYMBOL_GPL(register_timer_hook);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221void unregister_timer_hook(int (*hook)(struct pt_regs *))
222{
223 WARN_ON(hook != timer_hook);
224 timer_hook = NULL;
225 /* make sure all CPUs see the NULL hook */
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -0700226 synchronize_sched(); /* Allow ongoing interrupts to complete. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228EXPORT_SYMBOL_GPL(unregister_timer_hook);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#ifdef CONFIG_SMP
232/*
233 * Each cpu has a pair of open-addressed hashtables for pending
234 * profile hits. read_profile() IPI's all cpus to request them
235 * to flip buffers and flushes their contents to prof_buffer itself.
236 * Flip requests are serialized by the profile_flip_mutex. The sole
237 * use of having a second hashtable is for avoiding cacheline
238 * contention that would otherwise happen during flushes of pending
239 * profile hits required for the accuracy of reported profile hits
240 * and so resurrect the interrupt livelock issue.
241 *
242 * The open-addressed hashtables are indexed by profile buffer slot
243 * and hold the number of pending hits to that profile buffer slot on
244 * a cpu in an entry. When the hashtable overflows, all pending hits
245 * are accounted to their corresponding profile buffer slots with
246 * atomic_add() and the hashtable emptied. As numerous pending hits
247 * may be accounted to a profile buffer slot in a hashtable entry,
248 * this amortizes a number of atomic profile buffer increments likely
249 * to be far larger than the number of entries in the hashtable,
250 * particularly given that the number of distinct profile buffer
251 * positions to which hits are accounted during short intervals (e.g.
252 * several seconds) is usually very small. Exclusion from buffer
253 * flipping is provided by interrupt disablement (note that for
Ingo Molnarece8a682006-12-06 20:37:24 -0800254 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
255 * process context).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 * The hash function is meant to be lightweight as opposed to strong,
257 * and was vaguely inspired by ppc64 firmware-supported inverted
258 * pagetable hash functions, but uses a full hashtable full of finite
259 * collision chains, not just pairs of them.
260 *
261 * -- wli
262 */
263static void __profile_flip_buffers(void *unused)
264{
265 int cpu = smp_processor_id();
266
267 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
268}
269
270static void profile_flip_buffers(void)
271{
272 int i, j, cpu;
273
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800274 mutex_lock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 j = per_cpu(cpu_profile_flip, get_cpu());
276 put_cpu();
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200277 on_each_cpu(__profile_flip_buffers, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 for_each_online_cpu(cpu) {
279 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
280 for (i = 0; i < NR_PROFILE_HIT; ++i) {
281 if (!hits[i].hits) {
282 if (hits[i].pc)
283 hits[i].pc = 0;
284 continue;
285 }
286 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
287 hits[i].hits = hits[i].pc = 0;
288 }
289 }
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800290 mutex_unlock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291}
292
293static void profile_discard_flip_buffers(void)
294{
295 int i, cpu;
296
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800297 mutex_lock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 i = per_cpu(cpu_profile_flip, get_cpu());
299 put_cpu();
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200300 on_each_cpu(__profile_flip_buffers, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 for_each_online_cpu(cpu) {
302 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
303 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
304 }
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800305 mutex_unlock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Ingo Molnarece8a682006-12-06 20:37:24 -0800308void profile_hits(int type, void *__pc, unsigned int nr_hits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
310 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
311 int i, j, cpu;
312 struct profile_hit *hits;
313
314 if (prof_on != type || !prof_buffer)
315 return;
316 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
317 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
318 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
319 cpu = get_cpu();
320 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
321 if (!hits) {
322 put_cpu();
323 return;
324 }
Ingo Molnarece8a682006-12-06 20:37:24 -0800325 /*
326 * We buffer the global profiler buffer into a per-CPU
327 * queue and thus reduce the number of global (and possibly
328 * NUMA-alien) accesses. The write-queue is self-coalescing:
329 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 local_irq_save(flags);
331 do {
332 for (j = 0; j < PROFILE_GRPSZ; ++j) {
333 if (hits[i + j].pc == pc) {
Ingo Molnarece8a682006-12-06 20:37:24 -0800334 hits[i + j].hits += nr_hits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 goto out;
336 } else if (!hits[i + j].hits) {
337 hits[i + j].pc = pc;
Ingo Molnarece8a682006-12-06 20:37:24 -0800338 hits[i + j].hits = nr_hits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 goto out;
340 }
341 }
342 i = (i + secondary) & (NR_PROFILE_HIT - 1);
343 } while (i != primary);
Ingo Molnarece8a682006-12-06 20:37:24 -0800344
345 /*
346 * Add the current hit(s) and flush the write-queue out
347 * to the global buffer:
348 */
349 atomic_add(nr_hits, &prof_buffer[pc]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 for (i = 0; i < NR_PROFILE_HIT; ++i) {
351 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
352 hits[i].pc = hits[i].hits = 0;
353 }
354out:
355 local_irq_restore(flags);
356 put_cpu();
357}
358
Al Viro84196412008-11-22 17:36:44 +0000359static int __cpuinit profile_cpu_callback(struct notifier_block *info,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 unsigned long action, void *__cpu)
361{
362 int node, cpu = (unsigned long)__cpu;
363 struct page *page;
364
365 switch (action) {
366 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700367 case CPU_UP_PREPARE_FROZEN:
Lee Schermerhorn3dd6b5f2010-05-26 14:45:04 -0700368 node = cpu_to_mem(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 per_cpu(cpu_profile_flip, cpu) = 0;
370 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
Mel Gorman6484eb32009-06-16 15:31:54 -0700371 page = alloc_pages_exact_node(node,
Christoph Lameter4199cfa2007-10-16 01:25:34 -0700372 GFP_KERNEL | __GFP_ZERO,
Christoph Lameterfbd98162006-09-25 23:31:45 -0700373 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (!page)
Akinobu Mita80b51842010-05-26 14:43:32 -0700375 return notifier_from_errno(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
377 }
378 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
Mel Gorman6484eb32009-06-16 15:31:54 -0700379 page = alloc_pages_exact_node(node,
Christoph Lameter4199cfa2007-10-16 01:25:34 -0700380 GFP_KERNEL | __GFP_ZERO,
Christoph Lameterfbd98162006-09-25 23:31:45 -0700381 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 if (!page)
383 goto out_free;
384 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
385 }
386 break;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100387out_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
389 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
390 __free_page(page);
Akinobu Mita80b51842010-05-26 14:43:32 -0700391 return notifier_from_errno(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700393 case CPU_ONLINE_FROZEN:
Rusty Russellc309b912009-01-01 10:12:27 +1030394 if (prof_cpu_mask != NULL)
395 cpumask_set_cpu(cpu, prof_cpu_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 break;
397 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700398 case CPU_UP_CANCELED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700400 case CPU_DEAD_FROZEN:
Rusty Russellc309b912009-01-01 10:12:27 +1030401 if (prof_cpu_mask != NULL)
402 cpumask_clear_cpu(cpu, prof_cpu_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 if (per_cpu(cpu_profile_hits, cpu)[0]) {
404 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
405 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
406 __free_page(page);
407 }
408 if (per_cpu(cpu_profile_hits, cpu)[1]) {
409 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
410 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
411 __free_page(page);
412 }
413 break;
414 }
415 return NOTIFY_OK;
416}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417#else /* !CONFIG_SMP */
418#define profile_flip_buffers() do { } while (0)
419#define profile_discard_flip_buffers() do { } while (0)
Ingo Molnar02316062006-12-06 20:38:17 -0800420#define profile_cpu_callback NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Ingo Molnarece8a682006-12-06 20:37:24 -0800422void profile_hits(int type, void *__pc, unsigned int nr_hits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
424 unsigned long pc;
425
426 if (prof_on != type || !prof_buffer)
427 return;
428 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
Ingo Molnarece8a682006-12-06 20:37:24 -0800429 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431#endif /* !CONFIG_SMP */
Andrew Mortonbbe1a59b2007-01-22 20:40:33 -0800432EXPORT_SYMBOL_GPL(profile_hits);
433
David Howells7d12e782006-10-05 14:55:46 +0100434void profile_tick(int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435{
David Howells7d12e782006-10-05 14:55:46 +0100436 struct pt_regs *regs = get_irq_regs();
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (type == CPU_PROFILING && timer_hook)
439 timer_hook(regs);
Rusty Russellc309b912009-01-01 10:12:27 +1030440 if (!user_mode(regs) && prof_cpu_mask != NULL &&
441 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 profile_hit(type, (void *)profile_pc(regs));
443}
444
445#ifdef CONFIG_PROC_FS
446#include <linux/proc_fs.h>
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700447#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700450static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700452 seq_cpumask(m, prof_cpu_mask);
453 seq_putc(m, '\n');
454 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455}
456
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700457static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700459 return single_open(file, prof_cpu_mask_proc_show, NULL);
460}
461
462static ssize_t prof_cpu_mask_proc_write(struct file *file,
463 const char __user *buffer, size_t count, loff_t *pos)
464{
Rusty Russellc309b912009-01-01 10:12:27 +1030465 cpumask_var_t new_value;
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700466 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Rusty Russellc309b912009-01-01 10:12:27 +1030468 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
469 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Rusty Russellc309b912009-01-01 10:12:27 +1030471 err = cpumask_parse_user(buffer, count, new_value);
472 if (!err) {
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700473 cpumask_copy(prof_cpu_mask, new_value);
474 err = count;
Rusty Russellc309b912009-01-01 10:12:27 +1030475 }
476 free_cpumask_var(new_value);
477 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
479
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700480static const struct file_operations prof_cpu_mask_proc_fops = {
481 .open = prof_cpu_mask_proc_open,
482 .read = seq_read,
483 .llseek = seq_lseek,
484 .release = single_release,
485 .write = prof_cpu_mask_proc_write,
486};
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
489{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 /* create /proc/irq/prof_cpu_mask */
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700491 proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492}
493
494/*
495 * This function accesses profiling information. The returned data is
496 * binary: the sampling step and the actual contents of the profile
497 * buffer. Use of the program readprofile is recommended in order to
498 * get meaningful info out of these data.
499 */
500static ssize_t
501read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
502{
503 unsigned long p = *ppos;
504 ssize_t read;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100505 char *pnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 unsigned int sample_step = 1 << prof_shift;
507
508 profile_flip_buffers();
509 if (p >= (prof_len+1)*sizeof(unsigned int))
510 return 0;
511 if (count > (prof_len+1)*sizeof(unsigned int) - p)
512 count = (prof_len+1)*sizeof(unsigned int) - p;
513 read = 0;
514
515 while (p < sizeof(unsigned int) && count > 0) {
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100516 if (put_user(*((char *)(&sample_step)+p), buf))
Heiko Carstens064b0222006-12-06 20:36:37 -0800517 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 buf++; p++; count--; read++;
519 }
520 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100521 if (copy_to_user(buf, (void *)pnt, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 return -EFAULT;
523 read += count;
524 *ppos += read;
525 return read;
526}
527
528/*
529 * Writing to /proc/profile resets the counters
530 *
531 * Writing a 'profiling multiplier' value into it also re-sets the profiling
532 * interrupt frequency, on architectures that support this.
533 */
534static ssize_t write_profile(struct file *file, const char __user *buf,
535 size_t count, loff_t *ppos)
536{
537#ifdef CONFIG_SMP
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100538 extern int setup_profiling_timer(unsigned int multiplier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540 if (count == sizeof(int)) {
541 unsigned int multiplier;
542
543 if (copy_from_user(&multiplier, buf, sizeof(int)))
544 return -EFAULT;
545
546 if (setup_profiling_timer(multiplier))
547 return -EINVAL;
548 }
549#endif
550 profile_discard_flip_buffers();
551 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
552 return count;
553}
554
Helge Deller15ad7cd2006-12-06 20:40:36 -0800555static const struct file_operations proc_profile_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 .read = read_profile,
557 .write = write_profile,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200558 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559};
560
561#ifdef CONFIG_SMP
Andrew Morton60a51512008-11-18 22:20:10 -0800562static void profile_nop(void *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
564}
565
Dave Hansen22b8ce92008-10-15 22:01:46 -0700566static int create_hash_tables(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
568 int cpu;
569
570 for_each_online_cpu(cpu) {
Lee Schermerhorn3dd6b5f2010-05-26 14:45:04 -0700571 int node = cpu_to_mem(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 struct page *page;
573
Mel Gorman6484eb32009-06-16 15:31:54 -0700574 page = alloc_pages_exact_node(node,
Christoph Lameterfbd98162006-09-25 23:31:45 -0700575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
576 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (!page)
578 goto out_cleanup;
579 per_cpu(cpu_profile_hits, cpu)[1]
580 = (struct profile_hit *)page_address(page);
Mel Gorman6484eb32009-06-16 15:31:54 -0700581 page = alloc_pages_exact_node(node,
Christoph Lameterfbd98162006-09-25 23:31:45 -0700582 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
583 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 if (!page)
585 goto out_cleanup;
586 per_cpu(cpu_profile_hits, cpu)[0]
587 = (struct profile_hit *)page_address(page);
588 }
589 return 0;
590out_cleanup:
591 prof_on = 0;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700592 smp_mb();
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200593 on_each_cpu(profile_nop, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 for_each_online_cpu(cpu) {
595 struct page *page;
596
597 if (per_cpu(cpu_profile_hits, cpu)[0]) {
598 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
599 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
600 __free_page(page);
601 }
602 if (per_cpu(cpu_profile_hits, cpu)[1]) {
603 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
604 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
605 __free_page(page);
606 }
607 }
608 return -1;
609}
610#else
611#define create_hash_tables() ({ 0; })
612#endif
613
Al Viro84196412008-11-22 17:36:44 +0000614int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
616 struct proc_dir_entry *entry;
617
618 if (!prof_on)
619 return 0;
620 if (create_hash_tables())
Dave Hansen22b8ce92008-10-15 22:01:46 -0700621 return -ENOMEM;
Denis V. Lunevc33fff02008-04-29 01:02:31 -0700622 entry = proc_create("profile", S_IWUSR | S_IRUGO,
623 NULL, &proc_profile_operations);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100624 if (!entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 entry->size = (1+prof_len) * sizeof(atomic_t);
627 hotcpu_notifier(profile_cpu_callback, 0);
628 return 0;
629}
630module_init(create_proc_profile);
631#endif /* CONFIG_PROC_FS */