blob: 6055b0604c86f0b4e0c87831307d03c151d0213c [file] [log] [blame]
Steven Rostedtf42c85e2009-04-13 12:25:37 -04001/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
Steven Rostedtff038f52009-11-18 20:27:27 -050021/*
Ingo Molnar091ad362009-11-26 09:04:55 +010022 * DECLARE_EVENT_CLASS can be used to add a generic function
Steven Rostedtff038f52009-11-18 20:27:27 -050023 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
Ingo Molnar091ad362009-11-26 09:04:55 +010026 * will map the DECLARE_EVENT_CLASS to the tracepoint.
Steven Rostedtff038f52009-11-18 20:27:27 -050027 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
Ingo Molnar091ad362009-11-26 09:04:55 +010032 DECLARE_EVENT_CLASS(name, \
Steven Rostedtff038f52009-11-18 20:27:27 -050033 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
Steven Rostedtf42c85e2009-04-13 12:25:37 -040041#undef __field
42#define __field(type, item) type item;
43
Li Zefan43b51ea2009-08-07 10:33:22 +080044#undef __field_ext
45#define __field_ext(type, item, filter_type) type item;
46
Li Zefan7fcb7c42009-06-01 15:35:46 +080047#undef __array
48#define __array(type, item, len) type item[len];
49
50#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080051#define __dynamic_array(type, item, len) u32 __data_loc_##item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080052
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020053#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080054#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020055
Steven Rostedtf42c85e2009-04-13 12:25:37 -040056#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args
58
Ingo Molnar091ad362009-11-26 09:04:55 +010059#undef DECLARE_EVENT_CLASS
60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
Steven Rostedtff038f52009-11-18 20:27:27 -050061 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
65 };
66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -040068 static struct ftrace_event_call event_##name
69
Steven Rostedte5bc9722009-11-18 20:36:26 -050070#undef DEFINE_EVENT_PRINT
71#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
73
Frederic Weisbecker0dd7b742009-08-28 00:50:06 +020074#undef __cpparg
75#define __cpparg(arg...) arg
76
Josh Stone97419872009-08-24 14:43:13 -070077/* Callbacks are meaningless to ftrace. */
78#undef TRACE_EVENT_FN
Frederic Weisbecker0dd7b742009-08-28 00:50:06 +020079#define TRACE_EVENT_FN(name, proto, args, tstruct, \
80 assign, print, reg, unreg) \
81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
Josh Stone97419872009-08-24 14:43:13 -070083
Steven Rostedtf42c85e2009-04-13 12:25:37 -040084#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
85
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020086
Steven Rostedtf42c85e2009-04-13 12:25:37 -040087/*
88 * Stage 2 of the trace events.
89 *
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020090 * Include the following:
91 *
Li Zefan7fcb7c42009-06-01 15:35:46 +080092 * struct ftrace_data_offsets_<call> {
Li Zefan7d536cb2009-07-16 10:54:02 +080093 * u32 <item1>;
94 * u32 <item2>;
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020095 * [...]
96 * };
97 *
Li Zefan7d536cb2009-07-16 10:54:02 +080098 * The __dynamic_array() macro will create each u32 <item>, this is
Li Zefan7fcb7c42009-06-01 15:35:46 +080099 * to keep the offset of each array from the beginning of the event.
Li Zefan7d536cb2009-07-16 10:54:02 +0800100 * The size of an array is also encoded, in the higher 16 bits of <item>.
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200101 */
102
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200103#undef __field
Li Zefan43b51ea2009-08-07 10:33:22 +0800104#define __field(type, item)
105
106#undef __field_ext
107#define __field_ext(type, item, filter_type)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200108
Li Zefan7fcb7c42009-06-01 15:35:46 +0800109#undef __array
110#define __array(type, item, len)
111
112#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +0800113#define __dynamic_array(type, item, len) u32 item;
Li Zefan7fcb7c42009-06-01 15:35:46 +0800114
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200115#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800116#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200117
Ingo Molnar091ad362009-11-26 09:04:55 +0100118#undef DECLARE_EVENT_CLASS
119#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800120 struct ftrace_data_offsets_##call { \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200121 tstruct; \
122 };
123
Steven Rostedtff038f52009-11-18 20:27:27 -0500124#undef DEFINE_EVENT
125#define DEFINE_EVENT(template, name, proto, args)
126
Steven Rostedte5bc9722009-11-18 20:36:26 -0500127#undef DEFINE_EVENT_PRINT
128#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
130
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
132
133/*
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400134 * Setup the showing format of trace point.
135 *
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
138 * {
139 * struct ftrace_raw_##call field;
140 * int ret;
141 *
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
146 *
147 * }
148 */
149
150#undef TP_STRUCT__entry
151#define TP_STRUCT__entry(args...) args
152
153#undef __field
154#define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
Tom Zanussi26a50742009-10-06 01:09:50 -0500156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400157 (unsigned int)offsetof(typeof(field), item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400160 if (!ret) \
161 return 0;
162
Li Zefan43b51ea2009-08-07 10:33:22 +0800163#undef __field_ext
164#define __field_ext(type, item, filter_type) __field(type, item)
165
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400166#undef __array
167#define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
Tom Zanussi26a50742009-10-06 01:09:50 -0500169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400170 (unsigned int)offsetof(typeof(field), item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400173 if (!ret) \
174 return 0;
175
176#undef __dynamic_array
177#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
Tom Zanussi26a50742009-10-06 01:09:50 -0500179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400184 if (!ret) \
185 return 0;
186
187#undef __string
188#define __string(item, src) __dynamic_array(char, item, -1)
189
190#undef __entry
191#define __entry REC
192
193#undef __print_symbolic
194#undef __get_dynamic_array
195#undef __get_str
196
197#undef TP_printk
Johannes Berg811cb502009-11-13 23:40:09 +0100198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400199
200#undef TP_fast_assign
201#define TP_fast_assign(args...) args
202
Peter Zijlstra3a659302009-07-21 17:34:57 +0200203#undef TP_perf_assign
204#define TP_perf_assign(args...)
205
Ingo Molnar091ad362009-11-26 09:04:55 +0100206#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400208static int \
Steven Rostedte5bc9722009-11-18 20:36:26 -0500209ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400211{ \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
214 \
215 tstruct; \
216 \
Steven Rostedte5bc9722009-11-18 20:36:26 -0500217 return ret; \
218} \
219 \
220static int \
221ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223{ \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400231 \
232 return ret; \
233}
234
Steven Rostedtff038f52009-11-18 20:27:27 -0500235#undef DEFINE_EVENT
236#define DEFINE_EVENT(template, name, proto, args)
237
Steven Rostedte5bc9722009-11-18 20:36:26 -0500238#undef DEFINE_EVENT_PRINT
239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240static int \
241ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243{ \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400250 trace_seq_printf(s, "\nprint fmt: " print); \
251 \
252 return ret; \
253}
254
255#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
256
257/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200258 * Stage 3 of the trace events.
259 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400260 * Override the macros in <trace/trace_events.h> to include the following:
261 *
262 * enum print_line_t
263 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
264 * {
265 * struct trace_seq *s = &iter->seq;
266 * struct ftrace_raw_<call> *field; <-- defined in stage 1
267 * struct trace_entry *entry;
Steven Rostedtbe74b732009-05-26 20:25:22 +0200268 * struct trace_seq *p;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400269 * int ret;
270 *
271 * entry = iter->ent;
272 *
273 * if (entry->type != event_<call>.id) {
274 * WARN_ON_ONCE(1);
275 * return TRACE_TYPE_UNHANDLED;
276 * }
277 *
278 * field = (typeof(field))entry;
279 *
Steven Rostedtbe74b732009-05-26 20:25:22 +0200280 * p = get_cpu_var(ftrace_event_seq);
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100281 * trace_seq_init(p);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400282 * ret = trace_seq_printf(s, <TP_printk> "\n");
Steven Rostedtbe74b732009-05-26 20:25:22 +0200283 * put_cpu();
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400284 * if (!ret)
285 * return TRACE_TYPE_PARTIAL_LINE;
286 *
287 * return TRACE_TYPE_HANDLED;
288 * }
289 *
290 * This is the method used to print the raw event to the trace
291 * output format. Note, this is not needed if the data is read
292 * in binary.
293 */
294
295#undef __entry
296#define __entry field
297
298#undef TP_printk
299#define TP_printk(fmt, args...) fmt "\n", args
300
Li Zefan7fcb7c42009-06-01 15:35:46 +0800301#undef __get_dynamic_array
302#define __get_dynamic_array(field) \
Li Zefan7d536cb2009-07-16 10:54:02 +0800303 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
Li Zefan7fcb7c42009-06-01 15:35:46 +0800304
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200305#undef __get_str
Li Zefan7fcb7c42009-06-01 15:35:46 +0800306#define __get_str(field) (char *)__get_dynamic_array(field)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200307
Steven Rostedtbe74b732009-05-26 20:25:22 +0200308#undef __print_flags
309#define __print_flags(flag, delim, flag_array...) \
310 ({ \
Steven Rostedta48f4942009-09-14 11:18:02 -0400311 static const struct trace_print_flags __flags[] = \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200312 { flag_array, { -1, NULL }}; \
Steven Rostedta48f4942009-09-14 11:18:02 -0400313 ftrace_print_flags_seq(p, delim, flag, __flags); \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200314 })
315
Steven Rostedt0f4fc292009-05-20 19:21:47 -0400316#undef __print_symbolic
317#define __print_symbolic(value, symbol_array...) \
318 ({ \
319 static const struct trace_print_flags symbols[] = \
320 { symbol_array, { -1, NULL }}; \
321 ftrace_print_symbols_seq(p, value, symbols); \
322 })
323
Ingo Molnar091ad362009-11-26 09:04:55 +0100324#undef DECLARE_EVENT_CLASS
325#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
Steven Rostedtec827c72009-09-14 10:50:23 -0400326static enum print_line_t \
Steven Rostedtff038f52009-11-18 20:27:27 -0500327ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400329{ \
330 struct trace_seq *s = &iter->seq; \
331 struct ftrace_raw_##call *field; \
332 struct trace_entry *entry; \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200333 struct trace_seq *p; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400334 int ret; \
335 \
336 entry = iter->ent; \
337 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500338 if (entry->type != event_id) { \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400339 WARN_ON_ONCE(1); \
340 return TRACE_TYPE_UNHANDLED; \
341 } \
342 \
343 field = (typeof(field))entry; \
344 \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200345 p = &get_cpu_var(ftrace_event_seq); \
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100346 trace_seq_init(p); \
Steven Rostedtff038f52009-11-18 20:27:27 -0500347 ret = trace_seq_printf(s, "%s: ", name); \
348 if (ret) \
349 ret = trace_seq_printf(s, print); \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200350 put_cpu(); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400351 if (!ret) \
352 return TRACE_TYPE_PARTIAL_LINE; \
353 \
354 return TRACE_TYPE_HANDLED; \
355}
Steven Rostedtff038f52009-11-18 20:27:27 -0500356
357#undef DEFINE_EVENT
358#define DEFINE_EVENT(template, name, proto, args) \
359static enum print_line_t \
360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361{ \
362 return ftrace_raw_output_id_##template(event_##name.id, \
363 #name, iter, flags); \
364}
365
Steven Rostedte5bc9722009-11-18 20:36:26 -0500366#undef DEFINE_EVENT_PRINT
367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400368static enum print_line_t \
369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
370{ \
371 struct trace_seq *s = &iter->seq; \
Steven Rostedte5bc9722009-11-18 20:36:26 -0500372 struct ftrace_raw_##template *field; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400373 struct trace_entry *entry; \
374 struct trace_seq *p; \
375 int ret; \
376 \
377 entry = iter->ent; \
378 \
379 if (entry->type != event_##call.id) { \
380 WARN_ON_ONCE(1); \
381 return TRACE_TYPE_UNHANDLED; \
382 } \
383 \
384 field = (typeof(field))entry; \
385 \
386 p = &get_cpu_var(ftrace_event_seq); \
387 trace_seq_init(p); \
Steven Rostedte5bc9722009-11-18 20:36:26 -0500388 ret = trace_seq_printf(s, "%s: ", #call); \
389 if (ret) \
390 ret = trace_seq_printf(s, print); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400391 put_cpu(); \
392 if (!ret) \
393 return TRACE_TYPE_PARTIAL_LINE; \
394 \
395 return TRACE_TYPE_HANDLED; \
396}
Steven Rostedte5bc9722009-11-18 20:36:26 -0500397
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400398#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
399
Li Zefan43b51ea2009-08-07 10:33:22 +0800400#undef __field_ext
401#define __field_ext(type, item, filter_type) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400402 ret = trace_define_field(event_call, #type, #item, \
403 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800404 sizeof(field.item), \
405 is_signed_type(type), filter_type); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400406 if (ret) \
407 return ret;
408
Li Zefan43b51ea2009-08-07 10:33:22 +0800409#undef __field
410#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
411
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400412#undef __array
413#define __array(type, item, len) \
414 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
415 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
416 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800417 sizeof(field.item), 0, FILTER_OTHER); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400418 if (ret) \
419 return ret;
420
Li Zefan7fcb7c42009-06-01 15:35:46 +0800421#undef __dynamic_array
422#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800423 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
Li Zefan43b51ea2009-08-07 10:33:22 +0800424 offsetof(typeof(field), __data_loc_##item), \
425 sizeof(field.__data_loc_##item), 0, \
426 FILTER_OTHER);
Li Zefan7fcb7c42009-06-01 15:35:46 +0800427
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200428#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800429#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200430
Ingo Molnar091ad362009-11-26 09:04:55 +0100431#undef DECLARE_EVENT_CLASS
432#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
Steven Rostedtec827c72009-09-14 10:50:23 -0400433static int \
Li Zefan14be96c2009-08-19 15:53:52 +0800434ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400435{ \
436 struct ftrace_raw_##call field; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400437 int ret; \
438 \
Li Zefane647d6b2009-08-19 15:54:32 +0800439 ret = trace_define_common_fields(event_call); \
440 if (ret) \
441 return ret; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400442 \
443 tstruct; \
444 \
445 return ret; \
446}
447
Steven Rostedtff038f52009-11-18 20:27:27 -0500448#undef DEFINE_EVENT
449#define DEFINE_EVENT(template, name, proto, args)
450
Steven Rostedte5bc9722009-11-18 20:36:26 -0500451#undef DEFINE_EVENT_PRINT
452#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
453 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
454
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400455#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
456
457/*
Li Zefan7fcb7c42009-06-01 15:35:46 +0800458 * remember the offset of each array from the beginning of the event.
459 */
460
461#undef __entry
462#define __entry entry
463
464#undef __field
465#define __field(type, item)
466
Li Zefan43b51ea2009-08-07 10:33:22 +0800467#undef __field_ext
468#define __field_ext(type, item, filter_type)
469
Li Zefan7fcb7c42009-06-01 15:35:46 +0800470#undef __array
471#define __array(type, item, len)
472
473#undef __dynamic_array
474#define __dynamic_array(type, item, len) \
475 __data_offsets->item = __data_size + \
476 offsetof(typeof(*entry), __data); \
Li Zefan7d536cb2009-07-16 10:54:02 +0800477 __data_offsets->item |= (len * sizeof(type)) << 16; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800478 __data_size += (len) * sizeof(type);
479
480#undef __string
Steven Rostedtff038f52009-11-18 20:27:27 -0500481#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
Li Zefan7fcb7c42009-06-01 15:35:46 +0800482
Ingo Molnar091ad362009-11-26 09:04:55 +0100483#undef DECLARE_EVENT_CLASS
484#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800485static inline int ftrace_get_offsets_##call( \
486 struct ftrace_data_offsets_##call *__data_offsets, proto) \
487{ \
488 int __data_size = 0; \
489 struct ftrace_raw_##call __maybe_unused *entry; \
490 \
491 tstruct; \
492 \
493 return __data_size; \
494}
495
Steven Rostedtff038f52009-11-18 20:27:27 -0500496#undef DEFINE_EVENT
497#define DEFINE_EVENT(template, name, proto, args)
498
Steven Rostedte5bc9722009-11-18 20:36:26 -0500499#undef DEFINE_EVENT_PRINT
500#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
501 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
502
Li Zefan7fcb7c42009-06-01 15:35:46 +0800503#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
504
Peter Zijlstra3a659302009-07-21 17:34:57 +0200505#ifdef CONFIG_EVENT_PROFILE
506
507/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200508 * Generate the functions needed for tracepoint perf_event support.
Peter Zijlstra3a659302009-07-21 17:34:57 +0200509 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200510 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
Peter Zijlstra3a659302009-07-21 17:34:57 +0200511 *
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200512 * static int ftrace_profile_enable_<call>(void)
Peter Zijlstra3a659302009-07-21 17:34:57 +0200513 * {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200514 * return register_trace_<call>(ftrace_profile_<call>);
Peter Zijlstra3a659302009-07-21 17:34:57 +0200515 * }
516 *
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200517 * static void ftrace_profile_disable_<call>(void)
Peter Zijlstra3a659302009-07-21 17:34:57 +0200518 * {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200519 * unregister_trace_<call>(ftrace_profile_<call>);
Peter Zijlstra3a659302009-07-21 17:34:57 +0200520 * }
521 *
522 */
523
Ingo Molnar091ad362009-11-26 09:04:55 +0100524#undef DECLARE_EVENT_CLASS
525#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
Steven Rostedtff038f52009-11-18 20:27:27 -0500526
527#undef DEFINE_EVENT
528#define DEFINE_EVENT(template, name, proto, args) \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200529 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500530static void ftrace_profile_##name(proto); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200531 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500532static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
Peter Zijlstra3a659302009-07-21 17:34:57 +0200533{ \
Steven Rostedtff038f52009-11-18 20:27:27 -0500534 return register_trace_##name(ftrace_profile_##name); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200535} \
536 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500537static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
Peter Zijlstra3a659302009-07-21 17:34:57 +0200538{ \
Steven Rostedtff038f52009-11-18 20:27:27 -0500539 unregister_trace_##name(ftrace_profile_##name); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200540}
541
Steven Rostedte5bc9722009-11-18 20:36:26 -0500542#undef DEFINE_EVENT_PRINT
543#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
544 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
545
Peter Zijlstra3a659302009-07-21 17:34:57 +0200546#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
547
Peter Zijlstra3a659302009-07-21 17:34:57 +0200548#endif
549
Li Zefan7fcb7c42009-06-01 15:35:46 +0800550/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200551 * Stage 4 of the trace events.
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400552 *
553 * Override the macros in <trace/trace_events.h> to include the following:
554 *
555 * static void ftrace_event_<call>(proto)
556 * {
557 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
558 * }
559 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400560 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400561 * {
562 * int ret;
563 *
564 * ret = register_trace_<call>(ftrace_event_<call>);
565 * if (!ret)
566 * pr_info("event trace: Could not activate trace point "
567 * "probe to <call>");
568 * return ret;
569 * }
570 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400571 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400572 * {
573 * unregister_trace_<call>(ftrace_event_<call>);
574 * }
575 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400576 *
577 * For those macros defined with TRACE_EVENT:
578 *
579 * static struct ftrace_event_call event_<call>;
580 *
581 * static void ftrace_raw_event_<call>(proto)
582 * {
583 * struct ring_buffer_event *event;
584 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
Steven Rostedte77405a2009-09-02 14:17:06 -0400585 * struct ring_buffer *buffer;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400586 * unsigned long irq_flags;
587 * int pc;
588 *
589 * local_save_flags(irq_flags);
590 * pc = preempt_count();
591 *
Steven Rostedte77405a2009-09-02 14:17:06 -0400592 * event = trace_current_buffer_lock_reserve(&buffer,
593 * event_<call>.id,
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400594 * sizeof(struct ftrace_raw_<call>),
595 * irq_flags, pc);
596 * if (!event)
597 * return;
598 * entry = ring_buffer_event_data(event);
599 *
600 * <assign>; <-- Here we assign the entries by the __field and
601 * __array macros.
602 *
Steven Rostedte77405a2009-09-02 14:17:06 -0400603 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400604 * }
605 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400606 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400607 * {
608 * int ret;
609 *
610 * ret = register_trace_<call>(ftrace_raw_event_<call>);
611 * if (!ret)
612 * pr_info("event trace: Could not activate trace point "
613 * "probe to <call>");
614 * return ret;
615 * }
616 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400617 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400618 * {
619 * unregister_trace_<call>(ftrace_raw_event_<call>);
620 * }
621 *
622 * static struct trace_event ftrace_event_type_<call> = {
623 * .trace = ftrace_raw_output_<call>, <-- stage 2
624 * };
625 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400626 * static struct ftrace_event_call __used
627 * __attribute__((__aligned__(4)))
628 * __attribute__((section("_ftrace_events"))) event_<call> = {
629 * .name = "<call>",
630 * .system = "<system>",
Li Zefan87d9b4e2009-12-08 11:14:20 +0800631 * .raw_init = trace_event_raw_init,
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400632 * .regfunc = ftrace_reg_event_<call>,
633 * .unregfunc = ftrace_unreg_event_<call>,
634 * .show_format = ftrace_format_<call>,
635 * }
636 *
637 */
638
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400639#ifdef CONFIG_EVENT_PROFILE
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400640
641#define _TRACE_PROFILE_INIT(call) \
642 .profile_count = ATOMIC_INIT(-1), \
643 .profile_enable = ftrace_profile_enable_##call, \
644 .profile_disable = ftrace_profile_disable_##call,
645
646#else
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400647#define _TRACE_PROFILE_INIT(call)
648#endif
649
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400650#undef __entry
651#define __entry entry
652
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200653#undef __field
654#define __field(type, item)
655
656#undef __array
657#define __array(type, item, len)
658
Li Zefan7fcb7c42009-06-01 15:35:46 +0800659#undef __dynamic_array
660#define __dynamic_array(type, item, len) \
661 __entry->__data_loc_##item = __data_offsets.item;
662
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200663#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800664#define __string(item, src) __dynamic_array(char, item, -1) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200665
666#undef __assign_str
667#define __assign_str(dst, src) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200668 strcpy(__get_str(dst), src);
669
Ingo Molnar091ad362009-11-26 09:04:55 +0100670#undef DECLARE_EVENT_CLASS
671#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400672 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500673static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
674 proto) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400675{ \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800676 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400677 struct ring_buffer_event *event; \
678 struct ftrace_raw_##call *entry; \
Steven Rostedte77405a2009-09-02 14:17:06 -0400679 struct ring_buffer *buffer; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400680 unsigned long irq_flags; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800681 int __data_size; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400682 int pc; \
683 \
684 local_save_flags(irq_flags); \
685 pc = preempt_count(); \
686 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800687 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200688 \
Steven Rostedte77405a2009-09-02 14:17:06 -0400689 event = trace_current_buffer_lock_reserve(&buffer, \
Steven Rostedtff038f52009-11-18 20:27:27 -0500690 event_call->id, \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800691 sizeof(*entry) + __data_size, \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200692 irq_flags, pc); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400693 if (!event) \
694 return; \
695 entry = ring_buffer_event_data(event); \
696 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800697 \
698 tstruct \
699 \
Li Zefana9c1c3a2009-06-01 15:35:13 +0800700 { assign; } \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400701 \
Steven Rostedte77405a2009-09-02 14:17:06 -0400702 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
703 trace_nowake_buffer_unlock_commit(buffer, \
704 event, irq_flags, pc); \
Steven Rostedtff038f52009-11-18 20:27:27 -0500705}
706
707#undef DEFINE_EVENT
708#define DEFINE_EVENT(template, call, proto, args) \
709 \
710static void ftrace_raw_event_##call(proto) \
711{ \
712 ftrace_raw_event_id_##template(&event_##call, args); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400713} \
714 \
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400715static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400716{ \
717 int ret; \
718 \
719 ret = register_trace_##call(ftrace_raw_event_##call); \
720 if (ret) \
721 pr_info("event trace: Could not activate trace point " \
Joe Perches48195682009-12-12 13:06:13 -0800722 "probe to %s\n", #call); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400723 return ret; \
724} \
725 \
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400726static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400727{ \
728 unregister_trace_##call(ftrace_raw_event_##call); \
729} \
730 \
731static struct trace_event ftrace_event_type_##call = { \
732 .trace = ftrace_raw_output_##call, \
Li Zefan87d9b4e2009-12-08 11:14:20 +0800733};
Steven Rostedte5bc9722009-11-18 20:36:26 -0500734
735#undef DEFINE_EVENT_PRINT
736#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
737 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
738
739#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
740
Ingo Molnar091ad362009-11-26 09:04:55 +0100741#undef DECLARE_EVENT_CLASS
742#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
Steven Rostedte5bc9722009-11-18 20:36:26 -0500743
744#undef DEFINE_EVENT
745#define DEFINE_EVENT(template, call, proto, args) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400746 \
747static struct ftrace_event_call __used \
748__attribute__((__aligned__(4))) \
749__attribute__((section("_ftrace_events"))) event_##call = { \
750 .name = #call, \
751 .system = __stringify(TRACE_SYSTEM), \
Steven Rostedt6d723732009-04-10 14:53:50 -0400752 .event = &ftrace_event_type_##call, \
Li Zefan87d9b4e2009-12-08 11:14:20 +0800753 .raw_init = trace_event_raw_init, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400754 .regfunc = ftrace_raw_reg_event_##call, \
755 .unregfunc = ftrace_raw_unreg_event_##call, \
Steven Rostedtff038f52009-11-18 20:27:27 -0500756 .show_format = ftrace_format_##template, \
757 .define_fields = ftrace_define_fields_##template, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400758 _TRACE_PROFILE_INIT(call) \
759}
760
Steven Rostedte5bc9722009-11-18 20:36:26 -0500761#undef DEFINE_EVENT_PRINT
762#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400763 \
764static struct ftrace_event_call __used \
765__attribute__((__aligned__(4))) \
766__attribute__((section("_ftrace_events"))) event_##call = { \
767 .name = #call, \
768 .system = __stringify(TRACE_SYSTEM), \
769 .event = &ftrace_event_type_##call, \
Li Zefan87d9b4e2009-12-08 11:14:20 +0800770 .raw_init = trace_event_raw_init, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400771 .regfunc = ftrace_raw_reg_event_##call, \
772 .unregfunc = ftrace_raw_unreg_event_##call, \
773 .show_format = ftrace_format_##call, \
Steven Rostedte5bc9722009-11-18 20:36:26 -0500774 .define_fields = ftrace_define_fields_##template, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400775 _TRACE_PROFILE_INIT(call) \
776}
777
778#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
779
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200780/*
781 * Define the insertion callback to profile events
782 *
783 * The job is very similar to ftrace_raw_event_<call> except that we don't
784 * insert in the ring buffer but in a perf counter.
785 *
786 * static void ftrace_profile_<call>(proto)
787 * {
788 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
789 * struct ftrace_event_call *event_call = &event_<call>;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200790 * extern void perf_tp_event(int, u64, u64, void *, int);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200791 * struct ftrace_raw_##call *entry;
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100792 * struct perf_trace_buf *trace_buf;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200793 * u64 __addr = 0, __count = 1;
794 * unsigned long irq_flags;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200795 * struct trace_entry *ent;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200796 * int __entry_size;
797 * int __data_size;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200798 * int __cpu
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200799 * int pc;
800 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200801 * pc = preempt_count();
802 *
803 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200804 *
805 * // Below we want to get the aligned size by taking into account
806 * // the u32 field that will later store the buffer size
807 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
808 * sizeof(u64));
809 * __entry_size -= sizeof(u32);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200810 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200811 * // Protect the non nmi buffer
812 * // This also protects the rcu read side
813 * local_irq_save(irq_flags);
814 * __cpu = smp_processor_id();
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200815 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200816 * if (in_nmi())
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100817 * trace_buf = rcu_dereference(perf_trace_buf_nmi);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200818 * else
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100819 * trace_buf = rcu_dereference(perf_trace_buf);
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200820 *
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100821 * if (!trace_buf)
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200822 * goto end;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200823 *
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100824 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
825 *
826 * // Avoid recursion from perf that could mess up the buffer
827 * if (trace_buf->recursion++)
828 * goto end_recursion;
829 *
830 * raw_data = trace_buf->buf;
831 *
832 * // Make recursion update visible before entering perf_tp_event
833 * // so that we protect from perf recursions.
834 *
835 * barrier();
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200836 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200837 * //zero dead bytes from alignment to avoid stack leak to userspace:
838 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
839 * entry = (struct ftrace_raw_<call> *)raw_data;
840 * ent = &entry->ent;
841 * tracing_generic_entry_update(ent, irq_flags, pc);
842 * ent->type = event_call->id;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200843 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200844 * <tstruct> <- do some jobs with dynamic arrays
845 *
846 * <assign> <- affect our values
847 *
Linus Torvalds43c12662009-09-21 09:15:07 -0700848 * perf_tp_event(event_call->id, __addr, __count, entry,
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200849 * __entry_size); <- submit them to perf counter
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200850 *
851 * }
852 */
853
854#ifdef CONFIG_EVENT_PROFILE
855
856#undef __perf_addr
857#define __perf_addr(a) __addr = (a)
858
859#undef __perf_count
860#define __perf_count(c) __count = (c)
861
Ingo Molnar091ad362009-11-26 09:04:55 +0100862#undef DECLARE_EVENT_CLASS
863#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
Steven Rostedtff038f52009-11-18 20:27:27 -0500864static void \
865ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
866 proto) \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200867{ \
868 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100869 extern int perf_swevent_get_recursion_context(void); \
870 extern void perf_swevent_put_recursion_context(int rctx); \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100871 extern void perf_tp_event(int, u64, u64, void *, int); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200872 struct ftrace_raw_##call *entry; \
873 u64 __addr = 0, __count = 1; \
874 unsigned long irq_flags; \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200875 struct trace_entry *ent; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200876 int __entry_size; \
877 int __data_size; \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100878 char *trace_buf; \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200879 char *raw_data; \
880 int __cpu; \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100881 int rctx; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200882 int pc; \
883 \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200884 pc = preempt_count(); \
885 \
886 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Peter Zijlstraa0445602009-08-10 11:16:52 +0200887 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
888 sizeof(u64)); \
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200889 __entry_size -= sizeof(u32); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200890 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200891 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
892 "profile buffer not large enough")) \
893 return; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200894 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200895 local_irq_save(irq_flags); \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100896 \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100897 rctx = perf_swevent_get_recursion_context(); \
898 if (rctx < 0) \
899 goto end_recursion; \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100900 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200901 __cpu = smp_processor_id(); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200902 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200903 if (in_nmi()) \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100904 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200905 else \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100906 trace_buf = rcu_dereference(perf_trace_buf); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200907 \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100908 if (!trace_buf) \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200909 goto end; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200910 \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100911 raw_data = per_cpu_ptr(trace_buf, __cpu); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200912 \
913 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
914 entry = (struct ftrace_raw_##call *)raw_data; \
915 ent = &entry->ent; \
916 tracing_generic_entry_update(ent, irq_flags, pc); \
917 ent->type = event_call->id; \
918 \
919 tstruct \
920 \
921 { assign; } \
922 \
Linus Torvalds43c12662009-09-21 09:15:07 -0700923 perf_tp_event(event_call->id, __addr, __count, entry, \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200924 __entry_size); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200925 \
926end: \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100927 perf_swevent_put_recursion_context(rctx); \
928end_recursion: \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200929 local_irq_restore(irq_flags); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200930}
931
Steven Rostedtff038f52009-11-18 20:27:27 -0500932#undef DEFINE_EVENT
933#define DEFINE_EVENT(template, call, proto, args) \
934static void ftrace_profile_##call(proto) \
935{ \
936 struct ftrace_event_call *event_call = &event_##call; \
937 \
938 ftrace_profile_templ_##template(event_call, args); \
939}
940
Steven Rostedte5bc9722009-11-18 20:36:26 -0500941#undef DEFINE_EVENT_PRINT
942#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
943 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
944
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200945#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
946#endif /* CONFIG_EVENT_PROFILE */
947
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400948#undef _TRACE_PROFILE_INIT
949