blob: 04fe68bbe7679f48ef5e58b883b7780a5b6fbbfd [file] [log] [blame]
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -04001
2#undef TRACE_SYSTEM_VAR
3
4#ifdef CONFIG_PERF_EVENTS
5
6#undef __entry
7#define __entry entry
8
9#undef __get_dynamic_array
10#define __get_dynamic_array(field) \
11 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
12
13#undef __get_dynamic_array_len
14#define __get_dynamic_array_len(field) \
15 ((__entry->__data_loc_##field >> 16) & 0xffff)
16
17#undef __get_str
Daniel Bristot de Oliveira934de5f2016-07-01 20:44:34 -030018#define __get_str(field) ((char *)__get_dynamic_array(field))
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040019
20#undef __get_bitmask
21#define __get_bitmask(field) (char *)__get_dynamic_array(field)
22
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040023#undef __perf_count
24#define __perf_count(c) (__count = (c))
25
26#undef __perf_task
27#define __perf_task(t) (__task = (t))
28
29#undef DECLARE_EVENT_CLASS
30#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
31static notrace void \
32perf_trace_##call(void *__data, proto) \
33{ \
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040034 struct trace_event_call *event_call = __data; \
Steven Rostedt (Red Hat)62323a12015-05-13 15:33:52 -040035 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
Steven Rostedt (Red Hat)a7237762015-05-13 15:27:47 -040036 struct trace_event_raw_##call *entry; \
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -070037 struct bpf_prog *prog = event_call->prog; \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040038 struct pt_regs *__regs; \
Alexei Starovoitove93735b2016-04-06 18:43:23 -070039 u64 __count = 1; \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040040 struct task_struct *__task = NULL; \
41 struct hlist_head *head; \
42 int __entry_size; \
43 int __data_size; \
44 int rctx; \
45 \
Steven Rostedt (Red Hat)d0ee8f42015-05-13 15:40:23 -040046 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040047 \
48 head = this_cpu_ptr(event_call->perf_events); \
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -070049 if (!prog && __builtin_constant_p(!__task) && !__task && \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040050 hlist_empty(head)) \
51 return; \
52 \
53 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
54 sizeof(u64)); \
55 __entry_size -= sizeof(u32); \
56 \
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -070057 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040058 if (!entry) \
59 return; \
60 \
61 perf_fetch_caller_regs(__regs); \
62 \
63 tstruct \
64 \
65 { assign; } \
66 \
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -070067 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
68 event_call, __count, __regs, \
69 head, __task); \
Steven Rostedt (Red Hat)ee53bbd2015-04-29 13:11:00 -040070}
71
72/*
73 * This part is compiled out, it is only here as a build time check
74 * to make sure that if the tracepoint handling changes, the
75 * perf probe will fail to compile unless it too is updated.
76 */
77#undef DEFINE_EVENT
78#define DEFINE_EVENT(template, call, proto, args) \
79static inline void perf_test_probe_##call(void) \
80{ \
81 check_trace_callback_type_##call(perf_trace_##template); \
82}
83
84
85#undef DEFINE_EVENT_PRINT
86#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
87 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
88
89#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
90#endif /* CONFIG_PERF_EVENTS */