blob: a182306eefd7a825d1f40483dbc1d206ea3ef59a [file] [log] [blame]
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -04001
2#undef TRACE_SYSTEM_VAR
3
4#ifdef CONFIG_PERF_EVENTS
5
6#undef __entry
7#define __entry entry
8
9#undef __get_dynamic_array
10#define __get_dynamic_array(field) \
11 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
12
13#undef __get_dynamic_array_len
14#define __get_dynamic_array_len(field) \
15 ((__entry->__data_loc_##field >> 16) & 0xffff)
16
17#undef __get_str
18#define __get_str(field) (char *)__get_dynamic_array(field)
19
20#undef __get_bitmask
21#define __get_bitmask(field) (char *)__get_dynamic_array(field)
22
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040023#undef __perf_count
24#define __perf_count(c) (__count = (c))
25
26#undef __perf_task
27#define __perf_task(t) (__task = (t))
28
29#undef DECLARE_EVENT_CLASS
30#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
31static notrace void \
32perf_trace_##call(void *__data, proto) \
33{ \
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040034 struct trace_event_call *event_call = __data; \
Steven Rostedt (Red Hat)62323a12015-05-13 15:33:52 -040035 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
Steven Rostedt (Red Hat)a7237762015-05-13 15:27:47 -040036 struct trace_event_raw_##call *entry; \
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -070037 struct bpf_prog *prog = event_call->prog; \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040038 struct pt_regs *__regs; \
Alexei Starovoitove93735b2016-04-06 18:43:23 -070039 u64 __count = 1; \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040040 struct task_struct *__task = NULL; \
41 struct hlist_head *head; \
42 int __entry_size; \
43 int __data_size; \
44 int rctx; \
45 \
Steven Rostedt (Red Hat)d0ee8f42015-05-13 15:40:23 -040046 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040047 \
48 head = this_cpu_ptr(event_call->perf_events); \
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -070049 if (!prog && __builtin_constant_p(!__task) && !__task && \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040050 hlist_empty(head)) \
51 return; \
52 \
53 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
54 sizeof(u64)); \
55 __entry_size -= sizeof(u32); \
56 \
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -070057 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040058 if (!entry) \
59 return; \
60 \
61 perf_fetch_caller_regs(__regs); \
62 \
63 tstruct \
64 \
65 { assign; } \
66 \
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -070067 if (prog) { \
68 *(struct pt_regs **)entry = __regs; \
69 if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \
70 perf_swevent_put_recursion_context(rctx); \
71 return; \
72 } \
73 } \
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -070074 perf_trace_buf_submit(entry, __entry_size, rctx, \
75 event_call->event.type, __count, __regs, \
76 head, __task); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040077}
78
79/*
80 * This part is compiled out, it is only here as a build time check
81 * to make sure that if the tracepoint handling changes, the
82 * perf probe will fail to compile unless it too is updated.
83 */
84#undef DEFINE_EVENT
85#define DEFINE_EVENT(template, call, proto, args) \
86static inline void perf_test_probe_##call(void) \
87{ \
88 check_trace_callback_type_##call(perf_trace_##template); \
89}
90
91
92#undef DEFINE_EVENT_PRINT
93#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
94 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
95
96#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
97#endif /* CONFIG_PERF_EVENTS */