blob: 360a77ad79e187319bbacbc5ad0b93c48061ba5a [file] [log] [blame]
Steven Rostedtf42c85e2009-04-13 12:25:37 -04001/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
Steven Rostedtf42c85e2009-04-13 12:25:37 -040021#undef __field
22#define __field(type, item) type item;
23
Li Zefan43b51ea2009-08-07 10:33:22 +080024#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
Li Zefan7fcb7c42009-06-01 15:35:46 +080027#undef __array
28#define __array(type, item, len) type item[len];
29
30#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080031#define __dynamic_array(type, item, len) u32 __data_loc_##item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080032
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020033#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080034#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020035
Steven Rostedtf42c85e2009-04-13 12:25:37 -040036#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args
38
39#undef TRACE_EVENT
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
Li Zefan7fcb7c42009-06-01 15:35:46 +080044 char __data[0]; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -040045 }; \
46 static struct ftrace_event_call event_##name
47
Josh Stone97419872009-08-24 14:43:13 -070048/* Callbacks are meaningless to ftrace. */
49#undef TRACE_EVENT_FN
50#define TRACE_EVENT_FN(name, proto, args, tstruct, \
51 assign, print, reg, unreg) \
52 TRACE_EVENT(name, TP_PROTO(proto), TP_ARGS(args), \
53 TP_STRUCT__entry(tstruct), \
54 TP_fast_assign(assign), \
55 TP_printk(print))
56
Steven Rostedtf42c85e2009-04-13 12:25:37 -040057#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
58
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020059
Steven Rostedtf42c85e2009-04-13 12:25:37 -040060/*
61 * Stage 2 of the trace events.
62 *
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020063 * Include the following:
64 *
Li Zefan7fcb7c42009-06-01 15:35:46 +080065 * struct ftrace_data_offsets_<call> {
Li Zefan7d536cb2009-07-16 10:54:02 +080066 * u32 <item1>;
67 * u32 <item2>;
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020068 * [...]
69 * };
70 *
Li Zefan7d536cb2009-07-16 10:54:02 +080071 * The __dynamic_array() macro will create each u32 <item>, this is
Li Zefan7fcb7c42009-06-01 15:35:46 +080072 * to keep the offset of each array from the beginning of the event.
Li Zefan7d536cb2009-07-16 10:54:02 +080073 * The size of an array is also encoded, in the higher 16 bits of <item>.
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020074 */
75
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020076#undef __field
Li Zefan43b51ea2009-08-07 10:33:22 +080077#define __field(type, item)
78
79#undef __field_ext
80#define __field_ext(type, item, filter_type)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020081
Li Zefan7fcb7c42009-06-01 15:35:46 +080082#undef __array
83#define __array(type, item, len)
84
85#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080086#define __dynamic_array(type, item, len) u32 item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080087
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020088#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080089#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020090
91#undef TRACE_EVENT
92#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +080093 struct ftrace_data_offsets_##call { \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020094 tstruct; \
95 };
96
97#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
98
99/*
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400100 * Setup the showing format of trace point.
101 *
102 * int
103 * ftrace_format_##call(struct trace_seq *s)
104 * {
105 * struct ftrace_raw_##call field;
106 * int ret;
107 *
108 * ret = trace_seq_printf(s, #type " " #item ";"
109 * " offset:%u; size:%u;\n",
110 * offsetof(struct ftrace_raw_##call, item),
111 * sizeof(field.type));
112 *
113 * }
114 */
115
116#undef TP_STRUCT__entry
117#define TP_STRUCT__entry(args...) args
118
119#undef __field
120#define __field(type, item) \
121 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
122 "offset:%u;\tsize:%u;\n", \
123 (unsigned int)offsetof(typeof(field), item), \
124 (unsigned int)sizeof(field.item)); \
125 if (!ret) \
126 return 0;
127
Li Zefan43b51ea2009-08-07 10:33:22 +0800128#undef __field_ext
129#define __field_ext(type, item, filter_type) __field(type, item)
130
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400131#undef __array
132#define __array(type, item, len) \
133 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
134 "offset:%u;\tsize:%u;\n", \
135 (unsigned int)offsetof(typeof(field), item), \
136 (unsigned int)sizeof(field.item)); \
137 if (!ret) \
138 return 0;
139
140#undef __dynamic_array
141#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800142 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400143 "offset:%u;\tsize:%u;\n", \
144 (unsigned int)offsetof(typeof(field), \
145 __data_loc_##item), \
146 (unsigned int)sizeof(field.__data_loc_##item)); \
147 if (!ret) \
148 return 0;
149
150#undef __string
151#define __string(item, src) __dynamic_array(char, item, -1)
152
153#undef __entry
154#define __entry REC
155
156#undef __print_symbolic
157#undef __get_dynamic_array
158#undef __get_str
159
160#undef TP_printk
161#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
162
163#undef TP_fast_assign
164#define TP_fast_assign(args...) args
165
Peter Zijlstra3a659302009-07-21 17:34:57 +0200166#undef TP_perf_assign
167#define TP_perf_assign(args...)
168
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400169#undef TRACE_EVENT
170#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
171static int \
Frederic Weisbeckere8f9f4d2009-08-11 17:42:52 +0200172ftrace_format_##call(struct ftrace_event_call *unused, \
173 struct trace_seq *s) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400174{ \
175 struct ftrace_raw_##call field __attribute__((unused)); \
176 int ret = 0; \
177 \
178 tstruct; \
179 \
180 trace_seq_printf(s, "\nprint fmt: " print); \
181 \
182 return ret; \
183}
184
185#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
186
187/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200188 * Stage 3 of the trace events.
189 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400190 * Override the macros in <trace/trace_events.h> to include the following:
191 *
192 * enum print_line_t
193 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
194 * {
195 * struct trace_seq *s = &iter->seq;
196 * struct ftrace_raw_<call> *field; <-- defined in stage 1
197 * struct trace_entry *entry;
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200198 * struct trace_seq *p;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400199 * int ret;
200 *
201 * entry = iter->ent;
202 *
203 * if (entry->type != event_<call>.id) {
204 * WARN_ON_ONCE(1);
205 * return TRACE_TYPE_UNHANDLED;
206 * }
207 *
208 * field = (typeof(field))entry;
209 *
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200210 * p = get_cpu_var(ftrace_event_seq);
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100211 * trace_seq_init(p);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400212 * ret = trace_seq_printf(s, <TP_printk> "\n");
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200213 * put_cpu();
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400214 * if (!ret)
215 * return TRACE_TYPE_PARTIAL_LINE;
216 *
217 * return TRACE_TYPE_HANDLED;
218 * }
219 *
220 * This is the method used to print the raw event to the trace
221 * output format. Note, this is not needed if the data is read
222 * in binary.
223 */
224
225#undef __entry
226#define __entry field
227
228#undef TP_printk
229#define TP_printk(fmt, args...) fmt "\n", args
230
Li Zefan7fcb7c42009-06-01 15:35:46 +0800231#undef __get_dynamic_array
232#define __get_dynamic_array(field) \
Li Zefan7d536cb2009-07-16 10:54:02 +0800233 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
Li Zefan7fcb7c42009-06-01 15:35:46 +0800234
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200235#undef __get_str
Li Zefan7fcb7c42009-06-01 15:35:46 +0800236#define __get_str(field) (char *)__get_dynamic_array(field)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200237
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200238#undef __print_flags
239#define __print_flags(flag, delim, flag_array...) \
240 ({ \
241 static const struct trace_print_flags flags[] = \
242 { flag_array, { -1, NULL }}; \
243 ftrace_print_flags_seq(p, delim, flag, flags); \
244 })
245
Steven Rostedt0f4fc292009-05-20 19:21:47 -0400246#undef __print_symbolic
247#define __print_symbolic(value, symbol_array...) \
248 ({ \
249 static const struct trace_print_flags symbols[] = \
250 { symbol_array, { -1, NULL }}; \
251 ftrace_print_symbols_seq(p, value, symbols); \
252 })
253
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400254#undef TRACE_EVENT
255#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
256enum print_line_t \
257ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
258{ \
259 struct trace_seq *s = &iter->seq; \
260 struct ftrace_raw_##call *field; \
261 struct trace_entry *entry; \
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200262 struct trace_seq *p; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400263 int ret; \
264 \
265 entry = iter->ent; \
266 \
267 if (entry->type != event_##call.id) { \
268 WARN_ON_ONCE(1); \
269 return TRACE_TYPE_UNHANDLED; \
270 } \
271 \
272 field = (typeof(field))entry; \
273 \
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200274 p = &get_cpu_var(ftrace_event_seq); \
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100275 trace_seq_init(p); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400276 ret = trace_seq_printf(s, #call ": " print); \
Steven Rostedtbe74b73a2009-05-26 20:25:22 +0200277 put_cpu(); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400278 if (!ret) \
279 return TRACE_TYPE_PARTIAL_LINE; \
280 \
281 return TRACE_TYPE_HANDLED; \
282}
283
284#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
285
Li Zefan43b51ea2009-08-07 10:33:22 +0800286#undef __field_ext
287#define __field_ext(type, item, filter_type) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400288 ret = trace_define_field(event_call, #type, #item, \
289 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800290 sizeof(field.item), \
291 is_signed_type(type), filter_type); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400292 if (ret) \
293 return ret;
294
Li Zefan43b51ea2009-08-07 10:33:22 +0800295#undef __field
296#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
297
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400298#undef __array
299#define __array(type, item, len) \
300 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
301 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
302 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800303 sizeof(field.item), 0, FILTER_OTHER); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400304 if (ret) \
305 return ret;
306
Li Zefan7fcb7c42009-06-01 15:35:46 +0800307#undef __dynamic_array
308#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800309 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
Li Zefan43b51ea2009-08-07 10:33:22 +0800310 offsetof(typeof(field), __data_loc_##item), \
311 sizeof(field.__data_loc_##item), 0, \
312 FILTER_OTHER);
Li Zefan7fcb7c42009-06-01 15:35:46 +0800313
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200314#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800315#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200316
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400317#undef TRACE_EVENT
318#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
319int \
Li Zefan14be96c2009-08-19 15:53:52 +0800320ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400321{ \
322 struct ftrace_raw_##call field; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400323 int ret; \
324 \
Li Zefane647d6b2009-08-19 15:54:32 +0800325 ret = trace_define_common_fields(event_call); \
326 if (ret) \
327 return ret; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400328 \
329 tstruct; \
330 \
331 return ret; \
332}
333
334#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
335
336/*
Li Zefan7fcb7c42009-06-01 15:35:46 +0800337 * remember the offset of each array from the beginning of the event.
338 */
339
340#undef __entry
341#define __entry entry
342
343#undef __field
344#define __field(type, item)
345
Li Zefan43b51ea2009-08-07 10:33:22 +0800346#undef __field_ext
347#define __field_ext(type, item, filter_type)
348
Li Zefan7fcb7c42009-06-01 15:35:46 +0800349#undef __array
350#define __array(type, item, len)
351
352#undef __dynamic_array
353#define __dynamic_array(type, item, len) \
354 __data_offsets->item = __data_size + \
355 offsetof(typeof(*entry), __data); \
Li Zefan7d536cb2009-07-16 10:54:02 +0800356 __data_offsets->item |= (len * sizeof(type)) << 16; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800357 __data_size += (len) * sizeof(type);
358
359#undef __string
360#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
361
362#undef TRACE_EVENT
363#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
364static inline int ftrace_get_offsets_##call( \
365 struct ftrace_data_offsets_##call *__data_offsets, proto) \
366{ \
367 int __data_size = 0; \
368 struct ftrace_raw_##call __maybe_unused *entry; \
369 \
370 tstruct; \
371 \
372 return __data_size; \
373}
374
375#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
376
Peter Zijlstra3a659302009-07-21 17:34:57 +0200377#ifdef CONFIG_EVENT_PROFILE
378
379/*
380 * Generate the functions needed for tracepoint perf_counter support.
381 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200382 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
Peter Zijlstra3a659302009-07-21 17:34:57 +0200383 *
384 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
385 * {
386 * int ret = 0;
387 *
388 * if (!atomic_inc_return(&event_call->profile_count))
389 * ret = register_trace_<call>(ftrace_profile_<call>);
390 *
391 * return ret;
392 * }
393 *
394 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
395 * {
396 * if (atomic_add_negative(-1, &event->call->profile_count))
397 * unregister_trace_<call>(ftrace_profile_<call>);
398 * }
399 *
400 */
401
Peter Zijlstra3a659302009-07-21 17:34:57 +0200402#undef TRACE_EVENT
403#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
404 \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200405static void ftrace_profile_##call(proto); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200406 \
407static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
408{ \
409 int ret = 0; \
410 \
411 if (!atomic_inc_return(&event_call->profile_count)) \
412 ret = register_trace_##call(ftrace_profile_##call); \
413 \
414 return ret; \
415} \
416 \
417static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
418{ \
419 if (atomic_add_negative(-1, &event_call->profile_count)) \
420 unregister_trace_##call(ftrace_profile_##call); \
421}
422
423#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
424
Peter Zijlstra3a659302009-07-21 17:34:57 +0200425#endif
426
Li Zefan7fcb7c42009-06-01 15:35:46 +0800427/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200428 * Stage 4 of the trace events.
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400429 *
430 * Override the macros in <trace/trace_events.h> to include the following:
431 *
432 * static void ftrace_event_<call>(proto)
433 * {
434 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
435 * }
436 *
437 * static int ftrace_reg_event_<call>(void)
438 * {
439 * int ret;
440 *
441 * ret = register_trace_<call>(ftrace_event_<call>);
442 * if (!ret)
443 * pr_info("event trace: Could not activate trace point "
444 * "probe to <call>");
445 * return ret;
446 * }
447 *
448 * static void ftrace_unreg_event_<call>(void)
449 * {
450 * unregister_trace_<call>(ftrace_event_<call>);
451 * }
452 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400453 *
454 * For those macros defined with TRACE_EVENT:
455 *
456 * static struct ftrace_event_call event_<call>;
457 *
458 * static void ftrace_raw_event_<call>(proto)
459 * {
460 * struct ring_buffer_event *event;
461 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
462 * unsigned long irq_flags;
463 * int pc;
464 *
465 * local_save_flags(irq_flags);
466 * pc = preempt_count();
467 *
468 * event = trace_current_buffer_lock_reserve(event_<call>.id,
469 * sizeof(struct ftrace_raw_<call>),
470 * irq_flags, pc);
471 * if (!event)
472 * return;
473 * entry = ring_buffer_event_data(event);
474 *
475 * <assign>; <-- Here we assign the entries by the __field and
476 * __array macros.
477 *
478 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
479 * }
480 *
481 * static int ftrace_raw_reg_event_<call>(void)
482 * {
483 * int ret;
484 *
485 * ret = register_trace_<call>(ftrace_raw_event_<call>);
486 * if (!ret)
487 * pr_info("event trace: Could not activate trace point "
488 * "probe to <call>");
489 * return ret;
490 * }
491 *
492 * static void ftrace_unreg_event_<call>(void)
493 * {
494 * unregister_trace_<call>(ftrace_raw_event_<call>);
495 * }
496 *
497 * static struct trace_event ftrace_event_type_<call> = {
498 * .trace = ftrace_raw_output_<call>, <-- stage 2
499 * };
500 *
501 * static int ftrace_raw_init_event_<call>(void)
502 * {
503 * int id;
504 *
505 * id = register_ftrace_event(&ftrace_event_type_<call>);
506 * if (!id)
507 * return -ENODEV;
508 * event_<call>.id = id;
509 * return 0;
510 * }
511 *
512 * static struct ftrace_event_call __used
513 * __attribute__((__aligned__(4)))
514 * __attribute__((section("_ftrace_events"))) event_<call> = {
515 * .name = "<call>",
516 * .system = "<system>",
517 * .raw_init = ftrace_raw_init_event_<call>,
518 * .regfunc = ftrace_reg_event_<call>,
519 * .unregfunc = ftrace_unreg_event_<call>,
520 * .show_format = ftrace_format_<call>,
521 * }
522 *
523 */
524
525#undef TP_FMT
526#define TP_FMT(fmt, args...) fmt "\n", ##args
527
528#ifdef CONFIG_EVENT_PROFILE
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400529
530#define _TRACE_PROFILE_INIT(call) \
531 .profile_count = ATOMIC_INIT(-1), \
532 .profile_enable = ftrace_profile_enable_##call, \
533 .profile_disable = ftrace_profile_disable_##call,
534
535#else
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400536#define _TRACE_PROFILE_INIT(call)
537#endif
538
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400539#undef __entry
540#define __entry entry
541
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200542#undef __field
543#define __field(type, item)
544
545#undef __array
546#define __array(type, item, len)
547
Li Zefan7fcb7c42009-06-01 15:35:46 +0800548#undef __dynamic_array
549#define __dynamic_array(type, item, len) \
550 __entry->__data_loc_##item = __data_offsets.item;
551
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200552#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800553#define __string(item, src) __dynamic_array(char, item, -1) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200554
555#undef __assign_str
556#define __assign_str(dst, src) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200557 strcpy(__get_str(dst), src);
558
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400559#undef TRACE_EVENT
560#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400561 \
562static struct ftrace_event_call event_##call; \
563 \
564static void ftrace_raw_event_##call(proto) \
565{ \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800566 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Zhaoleif2aebae2009-05-27 21:36:02 +0800567 struct ftrace_event_call *event_call = &event_##call; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400568 struct ring_buffer_event *event; \
569 struct ftrace_raw_##call *entry; \
570 unsigned long irq_flags; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800571 int __data_size; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400572 int pc; \
573 \
574 local_save_flags(irq_flags); \
575 pc = preempt_count(); \
576 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800577 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200578 \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400579 event = trace_current_buffer_lock_reserve(event_##call.id, \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800580 sizeof(*entry) + __data_size, \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200581 irq_flags, pc); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400582 if (!event) \
583 return; \
584 entry = ring_buffer_event_data(event); \
585 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800586 \
587 tstruct \
588 \
Li Zefana9c1c3a2009-06-01 15:35:13 +0800589 { assign; } \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400590 \
Zhaoleif2aebae2009-05-27 21:36:02 +0800591 if (!filter_current_check_discard(event_call, entry, event)) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400592 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
593} \
594 \
Jason Baron69fd4f02009-08-10 16:52:44 -0400595static int ftrace_raw_reg_event_##call(void *ptr) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400596{ \
597 int ret; \
598 \
599 ret = register_trace_##call(ftrace_raw_event_##call); \
600 if (ret) \
601 pr_info("event trace: Could not activate trace point " \
602 "probe to " #call "\n"); \
603 return ret; \
604} \
605 \
Jason Baron69fd4f02009-08-10 16:52:44 -0400606static void ftrace_raw_unreg_event_##call(void *ptr) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400607{ \
608 unregister_trace_##call(ftrace_raw_event_##call); \
609} \
610 \
611static struct trace_event ftrace_event_type_##call = { \
612 .trace = ftrace_raw_output_##call, \
613}; \
614 \
615static int ftrace_raw_init_event_##call(void) \
616{ \
617 int id; \
618 \
619 id = register_ftrace_event(&ftrace_event_type_##call); \
620 if (!id) \
621 return -ENODEV; \
622 event_##call.id = id; \
623 INIT_LIST_HEAD(&event_##call.fields); \
624 init_preds(&event_##call); \
625 return 0; \
626} \
627 \
628static struct ftrace_event_call __used \
629__attribute__((__aligned__(4))) \
630__attribute__((section("_ftrace_events"))) event_##call = { \
631 .name = #call, \
632 .system = __stringify(TRACE_SYSTEM), \
Steven Rostedt6d723732009-04-10 14:53:50 -0400633 .event = &ftrace_event_type_##call, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400634 .raw_init = ftrace_raw_init_event_##call, \
635 .regfunc = ftrace_raw_reg_event_##call, \
636 .unregfunc = ftrace_raw_unreg_event_##call, \
637 .show_format = ftrace_format_##call, \
638 .define_fields = ftrace_define_fields_##call, \
639 _TRACE_PROFILE_INIT(call) \
640}
641
642#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
643
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200644/*
645 * Define the insertion callback to profile events
646 *
647 * The job is very similar to ftrace_raw_event_<call> except that we don't
648 * insert in the ring buffer but in a perf counter.
649 *
650 * static void ftrace_profile_<call>(proto)
651 * {
652 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
653 * struct ftrace_event_call *event_call = &event_<call>;
654 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
655 * struct ftrace_raw_##call *entry;
656 * u64 __addr = 0, __count = 1;
657 * unsigned long irq_flags;
658 * int __entry_size;
659 * int __data_size;
660 * int pc;
661 *
662 * local_save_flags(irq_flags);
663 * pc = preempt_count();
664 *
665 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200666 *
667 * // Below we want to get the aligned size by taking into account
668 * // the u32 field that will later store the buffer size
669 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
670 * sizeof(u64));
671 * __entry_size -= sizeof(u32);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200672 *
673 * do {
674 * char raw_data[__entry_size]; <- allocate our sample in the stack
675 * struct trace_entry *ent;
676 *
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200677 * zero dead bytes from alignment to avoid stack leak to userspace:
678 *
679 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200680 * entry = (struct ftrace_raw_<call> *)raw_data;
681 * ent = &entry->ent;
682 * tracing_generic_entry_update(ent, irq_flags, pc);
683 * ent->type = event_call->id;
684 *
685 * <tstruct> <- do some jobs with dynamic arrays
686 *
687 * <assign> <- affect our values
688 *
689 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
690 * __entry_size); <- submit them to perf counter
691 * } while (0);
692 *
693 * }
694 */
695
696#ifdef CONFIG_EVENT_PROFILE
697
698#undef __perf_addr
699#define __perf_addr(a) __addr = (a)
700
701#undef __perf_count
702#define __perf_count(c) __count = (c)
703
704#undef TRACE_EVENT
705#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
706static void ftrace_profile_##call(proto) \
707{ \
708 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
709 struct ftrace_event_call *event_call = &event_##call; \
710 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
711 struct ftrace_raw_##call *entry; \
712 u64 __addr = 0, __count = 1; \
713 unsigned long irq_flags; \
714 int __entry_size; \
715 int __data_size; \
716 int pc; \
717 \
718 local_save_flags(irq_flags); \
719 pc = preempt_count(); \
720 \
721 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Peter Zijlstraa0445602009-08-10 11:16:52 +0200722 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
723 sizeof(u64)); \
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200724 __entry_size -= sizeof(u32); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200725 \
726 do { \
727 char raw_data[__entry_size]; \
728 struct trace_entry *ent; \
729 \
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200730 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200731 entry = (struct ftrace_raw_##call *)raw_data; \
732 ent = &entry->ent; \
733 tracing_generic_entry_update(ent, irq_flags, pc); \
734 ent->type = event_call->id; \
735 \
736 tstruct \
737 \
738 { assign; } \
739 \
740 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
741 __entry_size); \
742 } while (0); \
743 \
744}
745
746#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
747#endif /* CONFIG_EVENT_PROFILE */
748
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400749#undef _TRACE_PROFILE_INIT
750