blob: 2969f65d8002fc8ffd42f3518dfbf3dfe4aaccd0 [file] [log] [blame]
Steven Rostedtf42c85e2009-04-13 12:25:37 -04001/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
Steven Rostedtff038f52009-11-18 20:27:27 -050021/*
22 * TRACE_EVENT_TEMPLATE can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the TRACE_EVENT_TEMPLATE to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 TRACE_EVENT_TEMPLATE(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
Steven Rostedtf42c85e2009-04-13 12:25:37 -040041#undef __field
42#define __field(type, item) type item;
43
Li Zefan43b51ea2009-08-07 10:33:22 +080044#undef __field_ext
45#define __field_ext(type, item, filter_type) type item;
46
Li Zefan7fcb7c42009-06-01 15:35:46 +080047#undef __array
48#define __array(type, item, len) type item[len];
49
50#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +080051#define __dynamic_array(type, item, len) u32 __data_loc_##item;
Li Zefan7fcb7c42009-06-01 15:35:46 +080052
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020053#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +080054#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020055
Steven Rostedtf42c85e2009-04-13 12:25:37 -040056#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args
58
Steven Rostedtff038f52009-11-18 20:27:27 -050059#undef TRACE_EVENT_TEMPLATE
60#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
65 };
66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -040068 static struct ftrace_event_call event_##name
69
Frederic Weisbecker0dd7b742009-08-28 00:50:06 +020070#undef __cpparg
71#define __cpparg(arg...) arg
72
Josh Stone97419872009-08-24 14:43:13 -070073/* Callbacks are meaningless to ftrace. */
74#undef TRACE_EVENT_FN
Frederic Weisbecker0dd7b742009-08-28 00:50:06 +020075#define TRACE_EVENT_FN(name, proto, args, tstruct, \
76 assign, print, reg, unreg) \
77 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
78 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
Josh Stone97419872009-08-24 14:43:13 -070079
Steven Rostedtf42c85e2009-04-13 12:25:37 -040080#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
81
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020082
Steven Rostedtf42c85e2009-04-13 12:25:37 -040083/*
84 * Stage 2 of the trace events.
85 *
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020086 * Include the following:
87 *
Li Zefan7fcb7c42009-06-01 15:35:46 +080088 * struct ftrace_data_offsets_<call> {
Li Zefan7d536cb2009-07-16 10:54:02 +080089 * u32 <item1>;
90 * u32 <item2>;
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020091 * [...]
92 * };
93 *
Li Zefan7d536cb2009-07-16 10:54:02 +080094 * The __dynamic_array() macro will create each u32 <item>, this is
Li Zefan7fcb7c42009-06-01 15:35:46 +080095 * to keep the offset of each array from the beginning of the event.
Li Zefan7d536cb2009-07-16 10:54:02 +080096 * The size of an array is also encoded, in the higher 16 bits of <item>.
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020097 */
98
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +020099#undef __field
Li Zefan43b51ea2009-08-07 10:33:22 +0800100#define __field(type, item)
101
102#undef __field_ext
103#define __field_ext(type, item, filter_type)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200104
Li Zefan7fcb7c42009-06-01 15:35:46 +0800105#undef __array
106#define __array(type, item, len)
107
108#undef __dynamic_array
Li Zefan7d536cb2009-07-16 10:54:02 +0800109#define __dynamic_array(type, item, len) u32 item;
Li Zefan7fcb7c42009-06-01 15:35:46 +0800110
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200111#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800112#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200113
Steven Rostedtff038f52009-11-18 20:27:27 -0500114#undef TRACE_EVENT_TEMPLATE
115#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800116 struct ftrace_data_offsets_##call { \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200117 tstruct; \
118 };
119
Steven Rostedtff038f52009-11-18 20:27:27 -0500120#undef DEFINE_EVENT
121#define DEFINE_EVENT(template, name, proto, args)
122
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200123#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
124
125/*
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400126 * Setup the showing format of trace point.
127 *
128 * int
129 * ftrace_format_##call(struct trace_seq *s)
130 * {
131 * struct ftrace_raw_##call field;
132 * int ret;
133 *
134 * ret = trace_seq_printf(s, #type " " #item ";"
135 * " offset:%u; size:%u;\n",
136 * offsetof(struct ftrace_raw_##call, item),
137 * sizeof(field.type));
138 *
139 * }
140 */
141
142#undef TP_STRUCT__entry
143#define TP_STRUCT__entry(args...) args
144
145#undef __field
146#define __field(type, item) \
147 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
Tom Zanussi26a50742009-10-06 01:09:50 -0500148 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400149 (unsigned int)offsetof(typeof(field), item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500150 (unsigned int)sizeof(field.item), \
151 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400152 if (!ret) \
153 return 0;
154
Li Zefan43b51ea2009-08-07 10:33:22 +0800155#undef __field_ext
156#define __field_ext(type, item, filter_type) __field(type, item)
157
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400158#undef __array
159#define __array(type, item, len) \
160 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
Tom Zanussi26a50742009-10-06 01:09:50 -0500161 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400162 (unsigned int)offsetof(typeof(field), item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500163 (unsigned int)sizeof(field.item), \
164 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400165 if (!ret) \
166 return 0;
167
168#undef __dynamic_array
169#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800170 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
Tom Zanussi26a50742009-10-06 01:09:50 -0500171 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400172 (unsigned int)offsetof(typeof(field), \
173 __data_loc_##item), \
Tom Zanussi26a50742009-10-06 01:09:50 -0500174 (unsigned int)sizeof(field.__data_loc_##item), \
175 (unsigned int)is_signed_type(type)); \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400176 if (!ret) \
177 return 0;
178
179#undef __string
180#define __string(item, src) __dynamic_array(char, item, -1)
181
182#undef __entry
183#define __entry REC
184
185#undef __print_symbolic
186#undef __get_dynamic_array
187#undef __get_str
188
189#undef TP_printk
190#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
191
192#undef TP_fast_assign
193#define TP_fast_assign(args...) args
194
Peter Zijlstra3a659302009-07-21 17:34:57 +0200195#undef TP_perf_assign
196#define TP_perf_assign(args...)
197
Steven Rostedtff038f52009-11-18 20:27:27 -0500198#undef TRACE_EVENT_TEMPLATE
199#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400200static int \
Frederic Weisbeckere8f9f4d2009-08-11 17:42:52 +0200201ftrace_format_##call(struct ftrace_event_call *unused, \
202 struct trace_seq *s) \
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400203{ \
204 struct ftrace_raw_##call field __attribute__((unused)); \
205 int ret = 0; \
206 \
207 tstruct; \
208 \
209 trace_seq_printf(s, "\nprint fmt: " print); \
210 \
211 return ret; \
212}
213
Steven Rostedtff038f52009-11-18 20:27:27 -0500214#undef DEFINE_EVENT
215#define DEFINE_EVENT(template, name, proto, args)
216
Steven Rostedt6ff9a642009-06-10 14:28:34 -0400217#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
218
219/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200220 * Stage 3 of the trace events.
221 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400222 * Override the macros in <trace/trace_events.h> to include the following:
223 *
224 * enum print_line_t
225 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
226 * {
227 * struct trace_seq *s = &iter->seq;
228 * struct ftrace_raw_<call> *field; <-- defined in stage 1
229 * struct trace_entry *entry;
Steven Rostedtbe74b732009-05-26 20:25:22 +0200230 * struct trace_seq *p;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400231 * int ret;
232 *
233 * entry = iter->ent;
234 *
235 * if (entry->type != event_<call>.id) {
236 * WARN_ON_ONCE(1);
237 * return TRACE_TYPE_UNHANDLED;
238 * }
239 *
240 * field = (typeof(field))entry;
241 *
Steven Rostedtbe74b732009-05-26 20:25:22 +0200242 * p = get_cpu_var(ftrace_event_seq);
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100243 * trace_seq_init(p);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400244 * ret = trace_seq_printf(s, <TP_printk> "\n");
Steven Rostedtbe74b732009-05-26 20:25:22 +0200245 * put_cpu();
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400246 * if (!ret)
247 * return TRACE_TYPE_PARTIAL_LINE;
248 *
249 * return TRACE_TYPE_HANDLED;
250 * }
251 *
252 * This is the method used to print the raw event to the trace
253 * output format. Note, this is not needed if the data is read
254 * in binary.
255 */
256
257#undef __entry
258#define __entry field
259
260#undef TP_printk
261#define TP_printk(fmt, args...) fmt "\n", args
262
Li Zefan7fcb7c42009-06-01 15:35:46 +0800263#undef __get_dynamic_array
264#define __get_dynamic_array(field) \
Li Zefan7d536cb2009-07-16 10:54:02 +0800265 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
Li Zefan7fcb7c42009-06-01 15:35:46 +0800266
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200267#undef __get_str
Li Zefan7fcb7c42009-06-01 15:35:46 +0800268#define __get_str(field) (char *)__get_dynamic_array(field)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200269
Steven Rostedtbe74b732009-05-26 20:25:22 +0200270#undef __print_flags
271#define __print_flags(flag, delim, flag_array...) \
272 ({ \
Steven Rostedta48f4942009-09-14 11:18:02 -0400273 static const struct trace_print_flags __flags[] = \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200274 { flag_array, { -1, NULL }}; \
Steven Rostedta48f4942009-09-14 11:18:02 -0400275 ftrace_print_flags_seq(p, delim, flag, __flags); \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200276 })
277
Steven Rostedt0f4fc292009-05-20 19:21:47 -0400278#undef __print_symbolic
279#define __print_symbolic(value, symbol_array...) \
280 ({ \
281 static const struct trace_print_flags symbols[] = \
282 { symbol_array, { -1, NULL }}; \
283 ftrace_print_symbols_seq(p, value, symbols); \
284 })
285
Steven Rostedtff038f52009-11-18 20:27:27 -0500286#undef TRACE_EVENT_TEMPLATE
287#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
Steven Rostedtec827c72009-09-14 10:50:23 -0400288static enum print_line_t \
Steven Rostedtff038f52009-11-18 20:27:27 -0500289ftrace_raw_output_id_##call(int event_id, const char *name, \
290 struct trace_iterator *iter, int flags) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400291{ \
292 struct trace_seq *s = &iter->seq; \
293 struct ftrace_raw_##call *field; \
294 struct trace_entry *entry; \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200295 struct trace_seq *p; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400296 int ret; \
297 \
298 entry = iter->ent; \
299 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500300 if (entry->type != event_id) { \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400301 WARN_ON_ONCE(1); \
302 return TRACE_TYPE_UNHANDLED; \
303 } \
304 \
305 field = (typeof(field))entry; \
306 \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200307 p = &get_cpu_var(ftrace_event_seq); \
Steven Whitehouse56d8bd32009-06-03 14:52:03 +0100308 trace_seq_init(p); \
Steven Rostedtff038f52009-11-18 20:27:27 -0500309 ret = trace_seq_printf(s, "%s: ", name); \
310 if (ret) \
311 ret = trace_seq_printf(s, print); \
Steven Rostedtbe74b732009-05-26 20:25:22 +0200312 put_cpu(); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400313 if (!ret) \
314 return TRACE_TYPE_PARTIAL_LINE; \
315 \
316 return TRACE_TYPE_HANDLED; \
317}
Steven Rostedtff038f52009-11-18 20:27:27 -0500318
319#undef DEFINE_EVENT
320#define DEFINE_EVENT(template, name, proto, args) \
321static enum print_line_t \
322ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
323{ \
324 return ftrace_raw_output_id_##template(event_##name.id, \
325 #name, iter, flags); \
326}
327
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400328#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
329
Li Zefan43b51ea2009-08-07 10:33:22 +0800330#undef __field_ext
331#define __field_ext(type, item, filter_type) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400332 ret = trace_define_field(event_call, #type, #item, \
333 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800334 sizeof(field.item), \
335 is_signed_type(type), filter_type); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400336 if (ret) \
337 return ret;
338
Li Zefan43b51ea2009-08-07 10:33:22 +0800339#undef __field
340#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
341
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400342#undef __array
343#define __array(type, item, len) \
344 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
345 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
346 offsetof(typeof(field), item), \
Li Zefan43b51ea2009-08-07 10:33:22 +0800347 sizeof(field.item), 0, FILTER_OTHER); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400348 if (ret) \
349 return ret;
350
Li Zefan7fcb7c42009-06-01 15:35:46 +0800351#undef __dynamic_array
352#define __dynamic_array(type, item, len) \
Lai Jiangshan68fd60a2009-07-16 10:53:34 +0800353 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
Li Zefan43b51ea2009-08-07 10:33:22 +0800354 offsetof(typeof(field), __data_loc_##item), \
355 sizeof(field.__data_loc_##item), 0, \
356 FILTER_OTHER);
Li Zefan7fcb7c42009-06-01 15:35:46 +0800357
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200358#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800359#define __string(item, src) __dynamic_array(char, item, -1)
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200360
Steven Rostedtff038f52009-11-18 20:27:27 -0500361#undef TRACE_EVENT_TEMPLATE
362#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \
Steven Rostedtec827c72009-09-14 10:50:23 -0400363static int \
Li Zefan14be96c2009-08-19 15:53:52 +0800364ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400365{ \
366 struct ftrace_raw_##call field; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400367 int ret; \
368 \
Li Zefane647d6b2009-08-19 15:54:32 +0800369 ret = trace_define_common_fields(event_call); \
370 if (ret) \
371 return ret; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400372 \
373 tstruct; \
374 \
375 return ret; \
376}
377
Steven Rostedtff038f52009-11-18 20:27:27 -0500378#undef DEFINE_EVENT
379#define DEFINE_EVENT(template, name, proto, args)
380
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400381#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
382
383/*
Li Zefan7fcb7c42009-06-01 15:35:46 +0800384 * remember the offset of each array from the beginning of the event.
385 */
386
387#undef __entry
388#define __entry entry
389
390#undef __field
391#define __field(type, item)
392
Li Zefan43b51ea2009-08-07 10:33:22 +0800393#undef __field_ext
394#define __field_ext(type, item, filter_type)
395
Li Zefan7fcb7c42009-06-01 15:35:46 +0800396#undef __array
397#define __array(type, item, len)
398
399#undef __dynamic_array
400#define __dynamic_array(type, item, len) \
401 __data_offsets->item = __data_size + \
402 offsetof(typeof(*entry), __data); \
Li Zefan7d536cb2009-07-16 10:54:02 +0800403 __data_offsets->item |= (len * sizeof(type)) << 16; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800404 __data_size += (len) * sizeof(type);
405
406#undef __string
Steven Rostedtff038f52009-11-18 20:27:27 -0500407#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
Li Zefan7fcb7c42009-06-01 15:35:46 +0800408
Steven Rostedtff038f52009-11-18 20:27:27 -0500409#undef TRACE_EVENT_TEMPLATE
410#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800411static inline int ftrace_get_offsets_##call( \
412 struct ftrace_data_offsets_##call *__data_offsets, proto) \
413{ \
414 int __data_size = 0; \
415 struct ftrace_raw_##call __maybe_unused *entry; \
416 \
417 tstruct; \
418 \
419 return __data_size; \
420}
421
Steven Rostedtff038f52009-11-18 20:27:27 -0500422#undef DEFINE_EVENT
423#define DEFINE_EVENT(template, name, proto, args)
424
Li Zefan7fcb7c42009-06-01 15:35:46 +0800425#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
426
Peter Zijlstra3a659302009-07-21 17:34:57 +0200427#ifdef CONFIG_EVENT_PROFILE
428
429/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200430 * Generate the functions needed for tracepoint perf_event support.
Peter Zijlstra3a659302009-07-21 17:34:57 +0200431 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200432 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
Peter Zijlstra3a659302009-07-21 17:34:57 +0200433 *
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200434 * static int ftrace_profile_enable_<call>(void)
Peter Zijlstra3a659302009-07-21 17:34:57 +0200435 * {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200436 * return register_trace_<call>(ftrace_profile_<call>);
Peter Zijlstra3a659302009-07-21 17:34:57 +0200437 * }
438 *
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200439 * static void ftrace_profile_disable_<call>(void)
Peter Zijlstra3a659302009-07-21 17:34:57 +0200440 * {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200441 * unregister_trace_<call>(ftrace_profile_<call>);
Peter Zijlstra3a659302009-07-21 17:34:57 +0200442 * }
443 *
444 */
445
Steven Rostedtff038f52009-11-18 20:27:27 -0500446#undef TRACE_EVENT_TEMPLATE
447#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)
448
449#undef DEFINE_EVENT
450#define DEFINE_EVENT(template, name, proto, args) \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200451 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500452static void ftrace_profile_##name(proto); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200453 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500454static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
Peter Zijlstra3a659302009-07-21 17:34:57 +0200455{ \
Steven Rostedtff038f52009-11-18 20:27:27 -0500456 return register_trace_##name(ftrace_profile_##name); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200457} \
458 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500459static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
Peter Zijlstra3a659302009-07-21 17:34:57 +0200460{ \
Steven Rostedtff038f52009-11-18 20:27:27 -0500461 unregister_trace_##name(ftrace_profile_##name); \
Peter Zijlstra3a659302009-07-21 17:34:57 +0200462}
463
464#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
465
Peter Zijlstra3a659302009-07-21 17:34:57 +0200466#endif
467
Li Zefan7fcb7c42009-06-01 15:35:46 +0800468/*
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200469 * Stage 4 of the trace events.
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400470 *
471 * Override the macros in <trace/trace_events.h> to include the following:
472 *
473 * static void ftrace_event_<call>(proto)
474 * {
475 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
476 * }
477 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400478 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400479 * {
480 * int ret;
481 *
482 * ret = register_trace_<call>(ftrace_event_<call>);
483 * if (!ret)
484 * pr_info("event trace: Could not activate trace point "
485 * "probe to <call>");
486 * return ret;
487 * }
488 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400489 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400490 * {
491 * unregister_trace_<call>(ftrace_event_<call>);
492 * }
493 *
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400494 *
495 * For those macros defined with TRACE_EVENT:
496 *
497 * static struct ftrace_event_call event_<call>;
498 *
499 * static void ftrace_raw_event_<call>(proto)
500 * {
501 * struct ring_buffer_event *event;
502 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
Steven Rostedte77405a2009-09-02 14:17:06 -0400503 * struct ring_buffer *buffer;
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400504 * unsigned long irq_flags;
505 * int pc;
506 *
507 * local_save_flags(irq_flags);
508 * pc = preempt_count();
509 *
Steven Rostedte77405a2009-09-02 14:17:06 -0400510 * event = trace_current_buffer_lock_reserve(&buffer,
511 * event_<call>.id,
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400512 * sizeof(struct ftrace_raw_<call>),
513 * irq_flags, pc);
514 * if (!event)
515 * return;
516 * entry = ring_buffer_event_data(event);
517 *
518 * <assign>; <-- Here we assign the entries by the __field and
519 * __array macros.
520 *
Steven Rostedte77405a2009-09-02 14:17:06 -0400521 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400522 * }
523 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400524 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400525 * {
526 * int ret;
527 *
528 * ret = register_trace_<call>(ftrace_raw_event_<call>);
529 * if (!ret)
530 * pr_info("event trace: Could not activate trace point "
531 * "probe to <call>");
532 * return ret;
533 * }
534 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400535 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400536 * {
537 * unregister_trace_<call>(ftrace_raw_event_<call>);
538 * }
539 *
540 * static struct trace_event ftrace_event_type_<call> = {
541 * .trace = ftrace_raw_output_<call>, <-- stage 2
542 * };
543 *
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400544 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400545 * {
546 * int id;
547 *
548 * id = register_ftrace_event(&ftrace_event_type_<call>);
549 * if (!id)
550 * return -ENODEV;
551 * event_<call>.id = id;
552 * return 0;
553 * }
554 *
555 * static struct ftrace_event_call __used
556 * __attribute__((__aligned__(4)))
557 * __attribute__((section("_ftrace_events"))) event_<call> = {
558 * .name = "<call>",
559 * .system = "<system>",
560 * .raw_init = ftrace_raw_init_event_<call>,
561 * .regfunc = ftrace_reg_event_<call>,
562 * .unregfunc = ftrace_unreg_event_<call>,
563 * .show_format = ftrace_format_<call>,
564 * }
565 *
566 */
567
568#undef TP_FMT
569#define TP_FMT(fmt, args...) fmt "\n", ##args
570
571#ifdef CONFIG_EVENT_PROFILE
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400572
573#define _TRACE_PROFILE_INIT(call) \
574 .profile_count = ATOMIC_INIT(-1), \
575 .profile_enable = ftrace_profile_enable_##call, \
576 .profile_disable = ftrace_profile_disable_##call,
577
578#else
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400579#define _TRACE_PROFILE_INIT(call)
580#endif
581
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400582#undef __entry
583#define __entry entry
584
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200585#undef __field
586#define __field(type, item)
587
588#undef __array
589#define __array(type, item, len)
590
Li Zefan7fcb7c42009-06-01 15:35:46 +0800591#undef __dynamic_array
592#define __dynamic_array(type, item, len) \
593 __entry->__data_loc_##item = __data_offsets.item;
594
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200595#undef __string
Li Zefan7fcb7c42009-06-01 15:35:46 +0800596#define __string(item, src) __dynamic_array(char, item, -1) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200597
598#undef __assign_str
599#define __assign_str(dst, src) \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200600 strcpy(__get_str(dst), src);
601
Steven Rostedtff038f52009-11-18 20:27:27 -0500602#undef TRACE_EVENT_TEMPLATE
603#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400604 \
Steven Rostedtff038f52009-11-18 20:27:27 -0500605static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
606 proto) \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400607{ \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800608 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400609 struct ring_buffer_event *event; \
610 struct ftrace_raw_##call *entry; \
Steven Rostedte77405a2009-09-02 14:17:06 -0400611 struct ring_buffer *buffer; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400612 unsigned long irq_flags; \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800613 int __data_size; \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400614 int pc; \
615 \
616 local_save_flags(irq_flags); \
617 pc = preempt_count(); \
618 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800619 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200620 \
Steven Rostedte77405a2009-09-02 14:17:06 -0400621 event = trace_current_buffer_lock_reserve(&buffer, \
Steven Rostedtff038f52009-11-18 20:27:27 -0500622 event_call->id, \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800623 sizeof(*entry) + __data_size, \
Frederic Weisbecker9cbf1172009-04-19 04:51:29 +0200624 irq_flags, pc); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400625 if (!event) \
626 return; \
627 entry = ring_buffer_event_data(event); \
628 \
Li Zefan7fcb7c42009-06-01 15:35:46 +0800629 \
630 tstruct \
631 \
Li Zefana9c1c3a2009-06-01 15:35:13 +0800632 { assign; } \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400633 \
Steven Rostedte77405a2009-09-02 14:17:06 -0400634 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
635 trace_nowake_buffer_unlock_commit(buffer, \
636 event, irq_flags, pc); \
Steven Rostedtff038f52009-11-18 20:27:27 -0500637}
638
639#undef DEFINE_EVENT
640#define DEFINE_EVENT(template, call, proto, args) \
641 \
642static void ftrace_raw_event_##call(proto) \
643{ \
644 ftrace_raw_event_id_##template(&event_##call, args); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400645} \
646 \
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400647static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400648{ \
649 int ret; \
650 \
651 ret = register_trace_##call(ftrace_raw_event_##call); \
652 if (ret) \
653 pr_info("event trace: Could not activate trace point " \
654 "probe to " #call "\n"); \
655 return ret; \
656} \
657 \
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400658static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400659{ \
660 unregister_trace_##call(ftrace_raw_event_##call); \
661} \
662 \
663static struct trace_event ftrace_event_type_##call = { \
664 .trace = ftrace_raw_output_##call, \
665}; \
666 \
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400667static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400668{ \
669 int id; \
670 \
671 id = register_ftrace_event(&ftrace_event_type_##call); \
672 if (!id) \
673 return -ENODEV; \
674 event_##call.id = id; \
675 INIT_LIST_HEAD(&event_##call.fields); \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400676 return 0; \
677} \
678 \
679static struct ftrace_event_call __used \
680__attribute__((__aligned__(4))) \
681__attribute__((section("_ftrace_events"))) event_##call = { \
682 .name = #call, \
683 .system = __stringify(TRACE_SYSTEM), \
Steven Rostedt6d723732009-04-10 14:53:50 -0400684 .event = &ftrace_event_type_##call, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400685 .raw_init = ftrace_raw_init_event_##call, \
686 .regfunc = ftrace_raw_reg_event_##call, \
687 .unregfunc = ftrace_raw_unreg_event_##call, \
Steven Rostedtff038f52009-11-18 20:27:27 -0500688 .show_format = ftrace_format_##template, \
689 .define_fields = ftrace_define_fields_##template, \
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400690 _TRACE_PROFILE_INIT(call) \
691}
692
693#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
694
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200695/*
696 * Define the insertion callback to profile events
697 *
698 * The job is very similar to ftrace_raw_event_<call> except that we don't
699 * insert in the ring buffer but in a perf counter.
700 *
701 * static void ftrace_profile_<call>(proto)
702 * {
703 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
704 * struct ftrace_event_call *event_call = &event_<call>;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200705 * extern void perf_tp_event(int, u64, u64, void *, int);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200706 * struct ftrace_raw_##call *entry;
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100707 * struct perf_trace_buf *trace_buf;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200708 * u64 __addr = 0, __count = 1;
709 * unsigned long irq_flags;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200710 * struct trace_entry *ent;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200711 * int __entry_size;
712 * int __data_size;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200713 * int __cpu
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200714 * int pc;
715 *
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200716 * pc = preempt_count();
717 *
718 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200719 *
720 * // Below we want to get the aligned size by taking into account
721 * // the u32 field that will later store the buffer size
722 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
723 * sizeof(u64));
724 * __entry_size -= sizeof(u32);
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200725 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200726 * // Protect the non nmi buffer
727 * // This also protects the rcu read side
728 * local_irq_save(irq_flags);
729 * __cpu = smp_processor_id();
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200730 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200731 * if (in_nmi())
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100732 * trace_buf = rcu_dereference(perf_trace_buf_nmi);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200733 * else
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100734 * trace_buf = rcu_dereference(perf_trace_buf);
Frederic Weisbecker1853db02009-08-10 16:38:36 +0200735 *
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100736 * if (!trace_buf)
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200737 * goto end;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200738 *
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100739 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
740 *
741 * // Avoid recursion from perf that could mess up the buffer
742 * if (trace_buf->recursion++)
743 * goto end_recursion;
744 *
745 * raw_data = trace_buf->buf;
746 *
747 * // Make recursion update visible before entering perf_tp_event
748 * // so that we protect from perf recursions.
749 *
750 * barrier();
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200751 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200752 * //zero dead bytes from alignment to avoid stack leak to userspace:
753 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
754 * entry = (struct ftrace_raw_<call> *)raw_data;
755 * ent = &entry->ent;
756 * tracing_generic_entry_update(ent, irq_flags, pc);
757 * ent->type = event_call->id;
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200758 *
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200759 * <tstruct> <- do some jobs with dynamic arrays
760 *
761 * <assign> <- affect our values
762 *
Linus Torvalds43c12662009-09-21 09:15:07 -0700763 * perf_tp_event(event_call->id, __addr, __count, entry,
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200764 * __entry_size); <- submit them to perf counter
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200765 *
766 * }
767 */
768
769#ifdef CONFIG_EVENT_PROFILE
770
771#undef __perf_addr
772#define __perf_addr(a) __addr = (a)
773
774#undef __perf_count
775#define __perf_count(c) __count = (c)
776
Steven Rostedtff038f52009-11-18 20:27:27 -0500777#undef TRACE_EVENT_TEMPLATE
778#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
779static void \
780ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
781 proto) \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200782{ \
783 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100784 extern int perf_swevent_get_recursion_context(void); \
785 extern void perf_swevent_put_recursion_context(int rctx); \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100786 extern void perf_tp_event(int, u64, u64, void *, int); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200787 struct ftrace_raw_##call *entry; \
788 u64 __addr = 0, __count = 1; \
789 unsigned long irq_flags; \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200790 struct trace_entry *ent; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200791 int __entry_size; \
792 int __data_size; \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100793 char *trace_buf; \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200794 char *raw_data; \
795 int __cpu; \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100796 int rctx; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200797 int pc; \
798 \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200799 pc = preempt_count(); \
800 \
801 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
Peter Zijlstraa0445602009-08-10 11:16:52 +0200802 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
803 sizeof(u64)); \
Frederic Weisbecker304703a2009-08-10 16:11:32 +0200804 __entry_size -= sizeof(u32); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200805 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200806 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
807 "profile buffer not large enough")) \
808 return; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200809 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200810 local_irq_save(irq_flags); \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100811 \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100812 rctx = perf_swevent_get_recursion_context(); \
813 if (rctx < 0) \
814 goto end_recursion; \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100815 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200816 __cpu = smp_processor_id(); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200817 \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200818 if (in_nmi()) \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100819 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200820 else \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100821 trace_buf = rcu_dereference(perf_trace_buf); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200822 \
Frederic Weisbecker444a2a32009-11-06 04:13:05 +0100823 if (!trace_buf) \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200824 goto end; \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200825 \
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +0100826 raw_data = per_cpu_ptr(trace_buf, __cpu); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200827 \
828 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
829 entry = (struct ftrace_raw_##call *)raw_data; \
830 ent = &entry->ent; \
831 tracing_generic_entry_update(ent, irq_flags, pc); \
832 ent->type = event_call->id; \
833 \
834 tstruct \
835 \
836 { assign; } \
837 \
Linus Torvalds43c12662009-09-21 09:15:07 -0700838 perf_tp_event(event_call->id, __addr, __count, entry, \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200839 __entry_size); \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200840 \
Peter Zijlstra4ed7c922009-11-23 11:37:29 +0100841end: \
842 perf_swevent_put_recursion_context(rctx); \
843end_recursion: \
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200844 local_irq_restore(irq_flags); \
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200845 \
846}
847
Steven Rostedtff038f52009-11-18 20:27:27 -0500848#undef DEFINE_EVENT
849#define DEFINE_EVENT(template, call, proto, args) \
850static void ftrace_profile_##call(proto) \
851{ \
852 struct ftrace_event_call *event_call = &event_##call; \
853 \
854 ftrace_profile_templ_##template(event_call, args); \
855}
856
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200857#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
858#endif /* CONFIG_EVENT_PROFILE */
859
Steven Rostedtf42c85e2009-04-13 12:25:37 -0400860#undef _TRACE_PROFILE_INIT
861