blob: eaf46bdd18a5f81719b21de90fba91d5e64e78ea [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
Zhaolei02af61b2009-04-10 14:26:18 +08005#define _TRACE_KMEM_H
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03006
7#include <linux/types.h>
Zhaoleifc182a42009-04-10 14:27:38 +08008#include <linux/tracepoint.h>
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03009
Steven Rostedt62ba1802009-05-15 16:16:30 -040010/*
11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves.
13 *
14 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15 *
16 * GFP_KERNEL|GFP_HARDWALL
17 *
18 * Thus most bits set go first.
19 */
20#define show_gfp_flags(flags) \
21 (flags) ? __print_flags(flags, "|", \
22 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 {(unsigned long)GFP_USER, "GFP_USER"}, \
25 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 ) : "GFP_NOWAIT"
46
Steven Rostedtea20d922009-04-10 08:54:16 -040047TRACE_EVENT(kmalloc,
48
49 TP_PROTO(unsigned long call_site,
50 const void *ptr,
51 size_t bytes_req,
52 size_t bytes_alloc,
53 gfp_t gfp_flags),
54
55 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56
57 TP_STRUCT__entry(
58 __field( unsigned long, call_site )
59 __field( const void *, ptr )
60 __field( size_t, bytes_req )
61 __field( size_t, bytes_alloc )
62 __field( gfp_t, gfp_flags )
63 ),
64
65 TP_fast_assign(
66 __entry->call_site = call_site;
67 __entry->ptr = ptr;
68 __entry->bytes_req = bytes_req;
69 __entry->bytes_alloc = bytes_alloc;
70 __entry->gfp_flags = gfp_flags;
71 ),
72
Steven Rostedt62ba1802009-05-15 16:16:30 -040073 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -040074 __entry->call_site,
75 __entry->ptr,
76 __entry->bytes_req,
77 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040078 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -040079);
80
81TRACE_EVENT(kmem_cache_alloc,
82
83 TP_PROTO(unsigned long call_site,
84 const void *ptr,
85 size_t bytes_req,
86 size_t bytes_alloc,
87 gfp_t gfp_flags),
88
89 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
90
91 TP_STRUCT__entry(
92 __field( unsigned long, call_site )
93 __field( const void *, ptr )
94 __field( size_t, bytes_req )
95 __field( size_t, bytes_alloc )
96 __field( gfp_t, gfp_flags )
97 ),
98
99 TP_fast_assign(
100 __entry->call_site = call_site;
101 __entry->ptr = ptr;
102 __entry->bytes_req = bytes_req;
103 __entry->bytes_alloc = bytes_alloc;
104 __entry->gfp_flags = gfp_flags;
105 ),
106
Steven Rostedt62ba1802009-05-15 16:16:30 -0400107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -0400108 __entry->call_site,
109 __entry->ptr,
110 __entry->bytes_req,
111 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -0400112 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -0400113);
114
115TRACE_EVENT(kmalloc_node,
116
117 TP_PROTO(unsigned long call_site,
118 const void *ptr,
119 size_t bytes_req,
120 size_t bytes_alloc,
121 gfp_t gfp_flags,
122 int node),
123
124 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
125
126 TP_STRUCT__entry(
127 __field( unsigned long, call_site )
128 __field( const void *, ptr )
129 __field( size_t, bytes_req )
130 __field( size_t, bytes_alloc )
131 __field( gfp_t, gfp_flags )
132 __field( int, node )
133 ),
134
135 TP_fast_assign(
136 __entry->call_site = call_site;
137 __entry->ptr = ptr;
138 __entry->bytes_req = bytes_req;
139 __entry->bytes_alloc = bytes_alloc;
140 __entry->gfp_flags = gfp_flags;
141 __entry->node = node;
142 ),
143
Steven Rostedt62ba1802009-05-15 16:16:30 -0400144 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400145 __entry->call_site,
146 __entry->ptr,
147 __entry->bytes_req,
148 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -0400149 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -0400150 __entry->node)
151);
152
153TRACE_EVENT(kmem_cache_alloc_node,
154
155 TP_PROTO(unsigned long call_site,
156 const void *ptr,
157 size_t bytes_req,
158 size_t bytes_alloc,
159 gfp_t gfp_flags,
160 int node),
161
162 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
163
164 TP_STRUCT__entry(
165 __field( unsigned long, call_site )
166 __field( const void *, ptr )
167 __field( size_t, bytes_req )
168 __field( size_t, bytes_alloc )
169 __field( gfp_t, gfp_flags )
170 __field( int, node )
171 ),
172
173 TP_fast_assign(
174 __entry->call_site = call_site;
175 __entry->ptr = ptr;
176 __entry->bytes_req = bytes_req;
177 __entry->bytes_alloc = bytes_alloc;
178 __entry->gfp_flags = gfp_flags;
179 __entry->node = node;
180 ),
181
Steven Rostedt62ba1802009-05-15 16:16:30 -0400182 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400183 __entry->call_site,
184 __entry->ptr,
185 __entry->bytes_req,
186 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -0400187 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -0400188 __entry->node)
189);
190
191TRACE_EVENT(kfree,
192
193 TP_PROTO(unsigned long call_site, const void *ptr),
194
195 TP_ARGS(call_site, ptr),
196
197 TP_STRUCT__entry(
198 __field( unsigned long, call_site )
199 __field( const void *, ptr )
200 ),
201
202 TP_fast_assign(
203 __entry->call_site = call_site;
204 __entry->ptr = ptr;
205 ),
206
207 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
208);
209
210TRACE_EVENT(kmem_cache_free,
211
212 TP_PROTO(unsigned long call_site, const void *ptr),
213
214 TP_ARGS(call_site, ptr),
215
216 TP_STRUCT__entry(
217 __field( unsigned long, call_site )
218 __field( const void *, ptr )
219 ),
220
221 TP_fast_assign(
222 __entry->call_site = call_site;
223 __entry->ptr = ptr;
224 ),
225
226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
227);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700228
229TRACE_EVENT(mm_page_free_direct,
230
231 TP_PROTO(struct page *page, unsigned int order),
232
233 TP_ARGS(page, order),
234
235 TP_STRUCT__entry(
236 __field( struct page *, page )
237 __field( unsigned int, order )
238 ),
239
240 TP_fast_assign(
241 __entry->page = page;
242 __entry->order = order;
243 ),
244
245 TP_printk("page=%p pfn=%lu order=%d",
246 __entry->page,
247 page_to_pfn(__entry->page),
248 __entry->order)
249);
250
251TRACE_EVENT(mm_pagevec_free,
252
253 TP_PROTO(struct page *page, int cold),
254
255 TP_ARGS(page, cold),
256
257 TP_STRUCT__entry(
258 __field( struct page *, page )
259 __field( int, cold )
260 ),
261
262 TP_fast_assign(
263 __entry->page = page;
264 __entry->cold = cold;
265 ),
266
267 TP_printk("page=%p pfn=%lu order=0 cold=%d",
268 __entry->page,
269 page_to_pfn(__entry->page),
270 __entry->cold)
271);
272
273TRACE_EVENT(mm_page_alloc,
274
275 TP_PROTO(struct page *page, unsigned int order,
276 gfp_t gfp_flags, int migratetype),
277
278 TP_ARGS(page, order, gfp_flags, migratetype),
279
280 TP_STRUCT__entry(
281 __field( struct page *, page )
282 __field( unsigned int, order )
283 __field( gfp_t, gfp_flags )
284 __field( int, migratetype )
285 ),
286
287 TP_fast_assign(
288 __entry->page = page;
289 __entry->order = order;
290 __entry->gfp_flags = gfp_flags;
291 __entry->migratetype = migratetype;
292 ),
293
294 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
295 __entry->page,
296 page_to_pfn(__entry->page),
297 __entry->order,
298 __entry->migratetype,
299 show_gfp_flags(__entry->gfp_flags))
300);
301
Mel Gorman0d3d0622009-09-21 17:02:44 -0700302TRACE_EVENT(mm_page_alloc_zone_locked,
303
304 TP_PROTO(struct page *page, unsigned int order, int migratetype),
305
306 TP_ARGS(page, order, migratetype),
307
308 TP_STRUCT__entry(
309 __field( struct page *, page )
310 __field( unsigned int, order )
311 __field( int, migratetype )
312 ),
313
314 TP_fast_assign(
315 __entry->page = page;
316 __entry->order = order;
317 __entry->migratetype = migratetype;
318 ),
319
320 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
321 __entry->page,
322 page_to_pfn(__entry->page),
323 __entry->order,
324 __entry->migratetype,
325 __entry->order == 0)
326);
327
328TRACE_EVENT(mm_page_pcpu_drain,
329
330 TP_PROTO(struct page *page, int order, int migratetype),
331
332 TP_ARGS(page, order, migratetype),
333
334 TP_STRUCT__entry(
335 __field( struct page *, page )
336 __field( int, order )
337 __field( int, migratetype )
338 ),
339
340 TP_fast_assign(
341 __entry->page = page;
342 __entry->order = order;
343 __entry->migratetype = migratetype;
344 ),
345
346 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
347 __entry->page,
348 page_to_pfn(__entry->page),
349 __entry->order,
350 __entry->migratetype)
351);
352
Mel Gormane0fff1b2009-09-21 17:02:42 -0700353TRACE_EVENT(mm_page_alloc_extfrag,
354
355 TP_PROTO(struct page *page,
356 int alloc_order, int fallback_order,
357 int alloc_migratetype, int fallback_migratetype),
358
359 TP_ARGS(page,
360 alloc_order, fallback_order,
361 alloc_migratetype, fallback_migratetype),
362
363 TP_STRUCT__entry(
364 __field( struct page *, page )
365 __field( int, alloc_order )
366 __field( int, fallback_order )
367 __field( int, alloc_migratetype )
368 __field( int, fallback_migratetype )
369 ),
370
371 TP_fast_assign(
372 __entry->page = page;
373 __entry->alloc_order = alloc_order;
374 __entry->fallback_order = fallback_order;
375 __entry->alloc_migratetype = alloc_migratetype;
376 __entry->fallback_migratetype = fallback_migratetype;
377 ),
378
379 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
380 __entry->page,
381 page_to_pfn(__entry->page),
382 __entry->alloc_order,
383 __entry->fallback_order,
384 pageblock_order,
385 __entry->alloc_migratetype,
386 __entry->fallback_migratetype,
387 __entry->fallback_order < pageblock_order,
388 __entry->alloc_migratetype == __entry->fallback_migratetype)
389);
390
Steven Rostedta8d154b2009-04-10 09:36:00 -0400391#endif /* _TRACE_KMEM_H */
Steven Rostedtea20d922009-04-10 08:54:16 -0400392
Steven Rostedta8d154b2009-04-10 09:36:00 -0400393/* This part must be outside protection */
394#include <trace/define_trace.h>