blob: 1b1f742a604528c0f35832799a55eebc8f651b79 [file] [log] [blame]
Steven Rostedtf42c85e2009-04-13 12:25:37 -04001/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
Steven Rostedtf42c85e2009-04-13 12:25:37 -040021#undef __field
22#define __field(type, item) type item;
23
Li Zefan43b51ea2009-08-07 10:33:22 +080024#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
Li Zefan7fcb7c42009-06-01 15:35:46 +080027#undef __array
28#define __array(type, item, len) type item[len];
29
30#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080031#define __dynamic_array(type, item, len) u32 __data_loc_##item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080032
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020033#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080034#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020035
Steven Rostedtf42c85e2009-04-13 12:25:37 -040036#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args
38
39#undef TRACE_EVENT
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
Li Zefan7fcb7c42009-06-01 15:35:46 +080044 char __data[0]; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -040045 }; \
46 static struct ftrace_event_call event_##name
47
48#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
49
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020050
Steven Rostedtf42c85e2009-04-13 12:25:37 -040051/*
52 * Stage 2 of the trace events.
53 *
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020054 * Include the following:
55 *
Li Zefan7fcb7c42009-06-01 15:35:46 +080056 * struct ftrace_data_offsets_<call> {
Li Zefan7d536cb2009-07-16 10:54:02 +080057 * u32 <item1>;
58 * u32 <item2>;
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020059 * [...]
60 * };
61 *
Li Zefan7d536cb2009-07-16 10:54:02 +080062 * The __dynamic_array() macro will create each u32 <item>, this is
Li Zefan7fcb7c42009-06-01 15:35:46 +080063 * to keep the offset of each array from the beginning of the event.
Li Zefan7d536cb2009-07-16 10:54:02 +080064 * The size of an array is also encoded, in the higher 16 bits of <item>.
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020065 */
66
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020067#undef __field
Li Zefan43b51ea2009-08-07 10:33:22 +080068#define __field(type, item)
69
70#undef __field_ext
71#define __field_ext(type, item, filter_type)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020072
Li Zefan7fcb7c42009-06-01 15:35:46 +080073#undef __array
74#define __array(type, item, len)
75
76#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080077#define __dynamic_array(type, item, len) u32 item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080078
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020079#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080080#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020081
82#undef TRACE_EVENT
83#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +080084 struct ftrace_data_offsets_##call { \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020085 tstruct; \
86 };
87
88#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
89
90/*
Steven Rostedt6ff9a642009-06-10 14:28:34 -040091 * Setup the showing format of trace point.
92 *
93 * int
94 * ftrace_format_##call(struct trace_seq *s)
95 * {
96 * struct ftrace_raw_##call field;
97 * int ret;
98 *
99 * ret = trace_seq_printf(s, #type " " #item ";"
100 * " offset:%u; size:%u;\n",
101 * offsetof(struct ftrace_raw_##call, item),
102 * sizeof(field.type));
103 *
104 * }
105 */
106
107#undef TP_STRUCT__entry
108#define TP_STRUCT__entry(args...) args
109
110#undef __field
111#define __field(type, item) \
112 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
113 "offset:%u;\tsize:%u;\n", \
114 (unsigned int)offsetof(typeof(field), item), \
115 (unsigned int)sizeof(field.item)); \
116 if (!ret) \
117 return 0;
118
Li Zefan43b51ea2009-08-07 10:33:22 +0800119#undef __field_ext
120#define __field_ext(type, item, filter_type) __field(type, item)
121
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400122#undef __array
123#define __array(type, item, len) \
124 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
125 "offset:%u;\tsize:%u;\n", \
126 (unsigned int)offsetof(typeof(field), item), \
127 (unsigned int)sizeof(field.item)); \
128 if (!ret) \
129 return 0;
130
131#undef __dynamic_array
132#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800133 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400134 "offset:%u;\tsize:%u;\n", \
135 (unsigned int)offsetof(typeof(field), \
136 __data_loc_##item), \
137 (unsigned int)sizeof(field.__data_loc_##item)); \
138 if (!ret) \
139 return 0;
140
141#undef __string
142#define __string(item, src) __dynamic_array(char, item, -1)
143
144#undef __entry
145#define __entry REC
146
147#undef __print_symbolic
148#undef __get_dynamic_array
149#undef __get_str
150
151#undef TP_printk
152#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
153
154#undef TP_fast_assign
155#define TP_fast_assign(args...) args
156
Peter Zijlstra3a659302009-07-21 17:34:57 +0200157#undef TP_perf_assign
158#define TP_perf_assign(args...)
159
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400160#undef TRACE_EVENT
161#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
162static int \
Frederic Weisbeckere8f9f4d2009-08-11 17:42:52 +0200163ftrace_format_##call(struct ftrace_event_call *unused, \
164 struct trace_seq *s) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400165{ \
166 struct ftrace_raw_##call field __attribute__((unused)); \
167 int ret = 0; \
168 \
169 tstruct; \
170 \
171 trace_seq_printf(s, "\nprint fmt: " print); \
172 \
173 return ret; \
174}
175
176#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
177
178/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200179 * Stage 3 of the trace events.
180 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400181 * Override the macros in <trace/trace_events.h> to include the following:
182 *
183 * enum print_line_t
184 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
185 * {
186 * struct trace_seq *s = &iter->seq;
187 * struct ftrace_raw_<call> *field; <-- defined in stage 1
188 * struct trace_entry *entry;
Steven Rostedtbe74b732009-05-26 20:25:22 +0200189 * struct trace_seq *p;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400190 * int ret;
191 *
192 * entry = iter->ent;
193 *
194 * if (entry->type != event_<call>.id) {
195 * WARN_ON_ONCE(1);
196 * return TRACE_TYPE_UNHANDLED;
197 * }
198 *
199 * field = (typeof(field))entry;
200 *
Steven Rostedtbe74b732009-05-26 20:25:22 +0200201 * p = get_cpu_var(ftrace_event_seq);
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100202 * trace_seq_init(p);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400203 * ret = trace_seq_printf(s, <TP_printk> "\n");
Steven Rostedtbe74b732009-05-26 20:25:22 +0200204 * put_cpu();
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400205 * if (!ret)
206 * return TRACE_TYPE_PARTIAL_LINE;
207 *
208 * return TRACE_TYPE_HANDLED;
209 * }
210 *
211 * This is the method used to print the raw event to the trace
212 * output format. Note, this is not needed if the data is read
213 * in binary.
214 */
215
216#undef __entry
217#define __entry field
218
219#undef TP_printk
220#define TP_printk(fmt, args...) fmt "\n", args
221
Li Zefan7fcb7c42009-06-01 15:35:46 +0800222#undef __get_dynamic_array
223#define __get_dynamic_array(field) \
Li Zefan7d536cb2009-07-16 10:54:02 +0800224 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
Li Zefan7fcb7c42009-06-01 15:35:46 +0800225
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200226#undef __get_str
Li Zefan7fcb7c42009-06-01 15:35:46 +0800227#define __get_str(field) (char *)__get_dynamic_array(field)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200228
Steven Rostedtbe74b732009-05-26 20:25:22 +0200229#undef __print_flags
230#define __print_flags(flag, delim, flag_array...) \
231 ({ \
232 static const struct trace_print_flags flags[] = \
233 { flag_array, { -1, NULL }}; \
234 ftrace_print_flags_seq(p, delim, flag, flags); \
235 })
236
Steven Rostedt0f4fc292009-05-20 19:21:47 -0400237#undef __print_symbolic
238#define __print_symbolic(value, symbol_array...) \
239 ({ \
240 static const struct trace_print_flags symbols[] = \
241 { symbol_array, { -1, NULL }}; \
242 ftrace_print_symbols_seq(p, value, symbols); \
243 })
244
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400245#undef TRACE_EVENT
246#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
247enum print_line_t \
248ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
249{ \
250 struct trace_seq *s = &iter->seq; \
251 struct ftrace_raw_##call *field; \
252 struct trace_entry *entry; \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200253 struct trace_seq *p; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400254 int ret; \
255 \
256 entry = iter->ent; \
257 \
258 if (entry->type != event_##call.id) { \
259 WARN_ON_ONCE(1); \
260 return TRACE_TYPE_UNHANDLED; \
261 } \
262 \
263 field = (typeof(field))entry; \
264 \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200265 p = &get_cpu_var(ftrace_event_seq); \
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100266 trace_seq_init(p); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400267 ret = trace_seq_printf(s, #call ": " print); \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200268 put_cpu(); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400269 if (!ret) \
270 return TRACE_TYPE_PARTIAL_LINE; \
271 \
272 return TRACE_TYPE_HANDLED; \
273}
274
275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
276
Li Zefan43b51ea2009-08-07 10:33:22 +0800277#undef __field_ext
278#define __field_ext(type, item, filter_type) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400279 ret = trace_define_field(event_call, #type, #item, \
280 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800281 sizeof(field.item), \
282 is_signed_type(type), filter_type); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400283 if (ret) \
284 return ret;
285
Li Zefan43b51ea2009-08-07 10:33:22 +0800286#undef __field
287#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
288
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400289#undef __array
290#define __array(type, item, len) \
291 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
292 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
293 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800294 sizeof(field.item), 0, FILTER_OTHER); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400295 if (ret) \
296 return ret;
297
Li Zefan7fcb7c42009-06-01 15:35:46 +0800298#undef __dynamic_array
299#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800300 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
Li Zefan43b51ea2009-08-07 10:33:22 +0800301 offsetof(typeof(field), __data_loc_##item), \
302 sizeof(field.__data_loc_##item), 0, \
303 FILTER_OTHER);
Li Zefan7fcb7c42009-06-01 15:35:46 +0800304
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200305#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800306#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200307
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400308#undef TRACE_EVENT
309#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
310int \
Li Zefan14be96c2009-08-19 15:53:52 +0800311ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400312{ \
313 struct ftrace_raw_##call field; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400314 int ret; \
315 \
Li Zefane647d6b2009-08-19 15:54:32 +0800316 ret = trace_define_common_fields(event_call); \
317 if (ret) \
318 return ret; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400319 \
320 tstruct; \
321 \
322 return ret; \
323}
324
325#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
326
327/*
Li Zefan7fcb7c42009-06-01 15:35:46 +0800328 * remember the offset of each array from the beginning of the event.
329 */
330
331#undef __entry
332#define __entry entry
333
334#undef __field
335#define __field(type, item)
336
Li Zefan43b51ea2009-08-07 10:33:22 +0800337#undef __field_ext
338#define __field_ext(type, item, filter_type)
339
Li Zefan7fcb7c42009-06-01 15:35:46 +0800340#undef __array
341#define __array(type, item, len)
342
343#undef __dynamic_array
344#define __dynamic_array(type, item, len) \
345 __data_offsets->item = __data_size + \
346 offsetof(typeof(*entry), __data); \
Li Zefan7d536cb2009-07-16 10:54:02 +0800347 __data_offsets->item |= (len * sizeof(type)) << 16; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800348 __data_size += (len) * sizeof(type);
349
350#undef __string
351#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
352
353#undef TRACE_EVENT
354#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
355static inline int ftrace_get_offsets_##call( \
356 struct ftrace_data_offsets_##call *__data_offsets, proto) \
357{ \
358 int __data_size = 0; \
359 struct ftrace_raw_##call __maybe_unused *entry; \
360 \
361 tstruct; \
362 \
363 return __data_size; \
364}
365
366#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
367
Peter Zijlstra3a659302009-07-21 17:34:57 +0200368#ifdef CONFIG_EVENT_PROFILE
369
370/*
371 * Generate the functions needed for tracepoint perf_counter support.
372 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200373 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
Peter Zijlstra3a659302009-07-21 17:34:57 +0200374 *
375 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
376 * {
377 * int ret = 0;
378 *
379 * if (!atomic_inc_return(&event_call->profile_count))
380 * ret = register_trace_<call>(ftrace_profile_<call>);
381 *
382 * return ret;
383 * }
384 *
385 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
386 * {
387 * if (atomic_add_negative(-1, &event->call->profile_count))
388 * unregister_trace_<call>(ftrace_profile_<call>);
389 * }
390 *
391 */
392
Peter Zijlstra3a659302009-07-21 17:34:57 +0200393#undef TRACE_EVENT
394#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
395 \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200396static void ftrace_profile_##call(proto); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200397 \
398static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
399{ \
400 int ret = 0; \
401 \
402 if (!atomic_inc_return(&event_call->profile_count)) \
403 ret = register_trace_##call(ftrace_profile_##call); \
404 \
405 return ret; \
406} \
407 \
408static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
409{ \
410 if (atomic_add_negative(-1, &event_call->profile_count)) \
411 unregister_trace_##call(ftrace_profile_##call); \
412}
413
414#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
415
Peter Zijlstra3a659302009-07-21 17:34:57 +0200416#endif
417
Li Zefan7fcb7c42009-06-01 15:35:46 +0800418/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200419 * Stage 4 of the trace events.
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400420 *
421 * Override the macros in <trace/trace_events.h> to include the following:
422 *
423 * static void ftrace_event_<call>(proto)
424 * {
425 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
426 * }
427 *
428 * static int ftrace_reg_event_<call>(void)
429 * {
430 * int ret;
431 *
432 * ret = register_trace_<call>(ftrace_event_<call>);
433 * if (!ret)
434 * pr_info("event trace: Could not activate trace point "
435 * "probe to <call>");
436 * return ret;
437 * }
438 *
439 * static void ftrace_unreg_event_<call>(void)
440 * {
441 * unregister_trace_<call>(ftrace_event_<call>);
442 * }
443 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400444 *
445 * For those macros defined with TRACE_EVENT:
446 *
447 * static struct ftrace_event_call event_<call>;
448 *
449 * static void ftrace_raw_event_<call>(proto)
450 * {
451 * struct ring_buffer_event *event;
452 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
453 * unsigned long irq_flags;
454 * int pc;
455 *
456 * local_save_flags(irq_flags);
457 * pc = preempt_count();
458 *
459 * event = trace_current_buffer_lock_reserve(event_<call>.id,
460 * sizeof(struct ftrace_raw_<call>),
461 * irq_flags, pc);
462 * if (!event)
463 * return;
464 * entry = ring_buffer_event_data(event);
465 *
466 * <assign>; <-- Here we assign the entries by the __field and
467 * __array macros.
468 *
469 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
470 * }
471 *
472 * static int ftrace_raw_reg_event_<call>(void)
473 * {
474 * int ret;
475 *
476 * ret = register_trace_<call>(ftrace_raw_event_<call>);
477 * if (!ret)
478 * pr_info("event trace: Could not activate trace point "
479 * "probe to <call>");
480 * return ret;
481 * }
482 *
483 * static void ftrace_unreg_event_<call>(void)
484 * {
485 * unregister_trace_<call>(ftrace_raw_event_<call>);
486 * }
487 *
488 * static struct trace_event ftrace_event_type_<call> = {
489 * .trace = ftrace_raw_output_<call>, <-- stage 2
490 * };
491 *
492 * static int ftrace_raw_init_event_<call>(void)
493 * {
494 * int id;
495 *
496 * id = register_ftrace_event(&ftrace_event_type_<call>);
497 * if (!id)
498 * return -ENODEV;
499 * event_<call>.id = id;
500 * return 0;
501 * }
502 *
503 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>",
507 * .system = "<system>",
508 * .raw_init = ftrace_raw_init_event_<call>,
509 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>,
511 * .show_format = ftrace_format_<call>,
512 * }
513 *
514 */
515
516#undef TP_FMT
517#define TP_FMT(fmt, args...) fmt "\n", ##args
518
519#ifdef CONFIG_EVENT_PROFILE
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400520
521#define _TRACE_PROFILE_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \
523 .profile_enable = ftrace_profile_enable_##call, \
524 .profile_disable = ftrace_profile_disable_##call,
525
526#else
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400527#define _TRACE_PROFILE_INIT(call)
528#endif
529
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400530#undef __entry
531#define __entry entry
532
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200533#undef __field
534#define __field(type, item)
535
536#undef __array
537#define __array(type, item, len)
538
Li Zefan7fcb7c42009-06-01 15:35:46 +0800539#undef __dynamic_array
540#define __dynamic_array(type, item, len) \
541 __entry->__data_loc_##item = __data_offsets.item;
542
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200543#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800544#define __string(item, src) __dynamic_array(char, item, -1) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200545
546#undef __assign_str
547#define __assign_str(dst, src) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200548 strcpy(__get_str(dst), src);
549
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400550#undef TRACE_EVENT
551#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400552 \
553static struct ftrace_event_call event_##call; \
554 \
555static void ftrace_raw_event_##call(proto) \
556{ \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Zhaoleif2aebae2009-05-27 21:36:02 +0800558 struct ftrace_event_call *event_call = &event_##call; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400559 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \
561 unsigned long irq_flags; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800562 int __data_size; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400563 int pc; \
564 \
565 local_save_flags(irq_flags); \
566 pc = preempt_count(); \
567 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800568 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200569 \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400570 event = trace_current_buffer_lock_reserve(event_##call.id, \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800571 sizeof(*entry) + __data_size, \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200572 irq_flags, pc); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400573 if (!event) \
574 return; \
575 entry = ring_buffer_event_data(event); \
576 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800577 \
578 tstruct \
579 \
Li Zefana9c1c3a2009-06-01 15:35:13 +0800580 { assign; } \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400581 \
Zhaoleif2aebae2009-05-27 21:36:02 +0800582 if (!filter_current_check_discard(event_call, entry, event)) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400583 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
584} \
585 \
Jason Baron69fd4f02009-08-10 16:52:44 -0400586static int ftrace_raw_reg_event_##call(void *ptr) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400587{ \
588 int ret; \
589 \
590 ret = register_trace_##call(ftrace_raw_event_##call); \
591 if (ret) \
592 pr_info("event trace: Could not activate trace point " \
593 "probe to " #call "\n"); \
594 return ret; \
595} \
596 \
Jason Baron69fd4f02009-08-10 16:52:44 -0400597static void ftrace_raw_unreg_event_##call(void *ptr) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400598{ \
599 unregister_trace_##call(ftrace_raw_event_##call); \
600} \
601 \
602static struct trace_event ftrace_event_type_##call = { \
603 .trace = ftrace_raw_output_##call, \
604}; \
605 \
606static int ftrace_raw_init_event_##call(void) \
607{ \
608 int id; \
609 \
610 id = register_ftrace_event(&ftrace_event_type_##call); \
611 if (!id) \
612 return -ENODEV; \
613 event_##call.id = id; \
614 INIT_LIST_HEAD(&event_##call.fields); \
615 init_preds(&event_##call); \
616 return 0; \
617} \
618 \
619static struct ftrace_event_call __used \
620__attribute__((__aligned__(4))) \
621__attribute__((section("_ftrace_events"))) event_##call = { \
622 .name = #call, \
623 .system = __stringify(TRACE_SYSTEM), \
Steven Rostedt6d723732009-04-10 14:53:50 -0400624 .event = &ftrace_event_type_##call, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400625 .raw_init = ftrace_raw_init_event_##call, \
626 .regfunc = ftrace_raw_reg_event_##call, \
627 .unregfunc = ftrace_raw_unreg_event_##call, \
628 .show_format = ftrace_format_##call, \
629 .define_fields = ftrace_define_fields_##call, \
630 _TRACE_PROFILE_INIT(call) \
631}
632
633#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
634
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200635/*
636 * Define the insertion callback to profile events
637 *
638 * The job is very similar to ftrace_raw_event_<call> except that we don't
639 * insert in the ring buffer but in a perf counter.
640 *
641 * static void ftrace_profile_<call>(proto)
642 * {
643 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
644 * struct ftrace_event_call *event_call = &event_<call>;
645 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
646 * struct ftrace_raw_##call *entry;
647 * u64 __addr = 0, __count = 1;
648 * unsigned long irq_flags;
649 * int __entry_size;
650 * int __data_size;
651 * int pc;
652 *
653 * local_save_flags(irq_flags);
654 * pc = preempt_count();
655 *
656 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200657 *
658 * // Below we want to get the aligned size by taking into account
659 * // the u32 field that will later store the buffer size
660 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
661 * sizeof(u64));
662 * __entry_size -= sizeof(u32);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200663 *
664 * do {
665 * char raw_data[__entry_size]; <- allocate our sample in the stack
666 * struct trace_entry *ent;
667 *
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200668 * zero dead bytes from alignment to avoid stack leak to userspace:
669 *
670 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200671 * entry = (struct ftrace_raw_<call> *)raw_data;
672 * ent = &entry->ent;
673 * tracing_generic_entry_update(ent, irq_flags, pc);
674 * ent->type = event_call->id;
675 *
676 * <tstruct> <- do some jobs with dynamic arrays
677 *
678 * <assign> <- affect our values
679 *
680 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
681 * __entry_size); <- submit them to perf counter
682 * } while (0);
683 *
684 * }
685 */
686
687#ifdef CONFIG_EVENT_PROFILE
688
689#undef __perf_addr
690#define __perf_addr(a) __addr = (a)
691
692#undef __perf_count
693#define __perf_count(c) __count = (c)
694
695#undef TRACE_EVENT
696#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
697static void ftrace_profile_##call(proto) \
698{ \
699 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
700 struct ftrace_event_call *event_call = &event_##call; \
701 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
702 struct ftrace_raw_##call *entry; \
703 u64 __addr = 0, __count = 1; \
704 unsigned long irq_flags; \
705 int __entry_size; \
706 int __data_size; \
707 int pc; \
708 \
709 local_save_flags(irq_flags); \
710 pc = preempt_count(); \
711 \
712 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Peter Zijlstraa0445602009-08-10 11:16:52 +0200713 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
714 sizeof(u64)); \
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200715 __entry_size -= sizeof(u32); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200716 \
717 do { \
718 char raw_data[__entry_size]; \
719 struct trace_entry *ent; \
720 \
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200721 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200722 entry = (struct ftrace_raw_##call *)raw_data; \
723 ent = &entry->ent; \
724 tracing_generic_entry_update(ent, irq_flags, pc); \
725 ent->type = event_call->id; \
726 \
727 tstruct \
728 \
729 { assign; } \
730 \
731 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
732 __entry_size); \
733 } while (0); \
734 \
735}
736
737#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
738#endif /* CONFIG_EVENT_PROFILE */
739
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400740#undef _TRACE_PROFILE_INIT
741