blob: 3adca0ca9dbee10479d34d5a3e3562609ef89e86 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
Zhaolei02af61b2009-04-10 14:26:18 +08005#define _TRACE_KMEM_H
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03006
7#include <linux/types.h>
Zhaoleifc182a42009-04-10 14:27:38 +08008#include <linux/tracepoint.h>
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03009
Steven Rostedt62ba1802009-05-15 16:16:30 -040010/*
11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves.
13 *
14 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15 *
16 * GFP_KERNEL|GFP_HARDWALL
17 *
18 * Thus most bits set go first.
19 */
20#define show_gfp_flags(flags) \
21 (flags) ? __print_flags(flags, "|", \
22 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 {(unsigned long)GFP_USER, "GFP_USER"}, \
25 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 ) : "GFP_NOWAIT"
46
Li Zefan53d04222009-11-26 15:04:10 +080047DECLARE_EVENT_CLASS(kmem_alloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040048
49 TP_PROTO(unsigned long call_site,
50 const void *ptr,
51 size_t bytes_req,
52 size_t bytes_alloc,
53 gfp_t gfp_flags),
54
55 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56
57 TP_STRUCT__entry(
58 __field( unsigned long, call_site )
59 __field( const void *, ptr )
60 __field( size_t, bytes_req )
61 __field( size_t, bytes_alloc )
62 __field( gfp_t, gfp_flags )
63 ),
64
65 TP_fast_assign(
66 __entry->call_site = call_site;
67 __entry->ptr = ptr;
68 __entry->bytes_req = bytes_req;
69 __entry->bytes_alloc = bytes_alloc;
70 __entry->gfp_flags = gfp_flags;
71 ),
72
Steven Rostedt62ba1802009-05-15 16:16:30 -040073 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -040074 __entry->call_site,
75 __entry->ptr,
76 __entry->bytes_req,
77 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040078 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -040079);
80
Li Zefan53d04222009-11-26 15:04:10 +080081DEFINE_EVENT(kmem_alloc, kmalloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040082
Li Zefan53d04222009-11-26 15:04:10 +080083 TP_PROTO(unsigned long call_site, const void *ptr,
84 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040085
Li Zefan53d04222009-11-26 15:04:10 +080086 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
Steven Rostedtea20d922009-04-10 08:54:16 -040087);
88
Li Zefan53d04222009-11-26 15:04:10 +080089DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
90
91 TP_PROTO(unsigned long call_site, const void *ptr,
92 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
93
94 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
95);
96
97DECLARE_EVENT_CLASS(kmem_alloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -040098
99 TP_PROTO(unsigned long call_site,
100 const void *ptr,
101 size_t bytes_req,
102 size_t bytes_alloc,
103 gfp_t gfp_flags,
104 int node),
105
106 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
107
108 TP_STRUCT__entry(
109 __field( unsigned long, call_site )
110 __field( const void *, ptr )
111 __field( size_t, bytes_req )
112 __field( size_t, bytes_alloc )
113 __field( gfp_t, gfp_flags )
114 __field( int, node )
115 ),
116
117 TP_fast_assign(
118 __entry->call_site = call_site;
119 __entry->ptr = ptr;
120 __entry->bytes_req = bytes_req;
121 __entry->bytes_alloc = bytes_alloc;
122 __entry->gfp_flags = gfp_flags;
123 __entry->node = node;
124 ),
125
Steven Rostedt62ba1802009-05-15 16:16:30 -0400126 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400127 __entry->call_site,
128 __entry->ptr,
129 __entry->bytes_req,
130 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -0400131 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -0400132 __entry->node)
133);
134
Li Zefan53d04222009-11-26 15:04:10 +0800135DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -0400136
Li Zefan53d04222009-11-26 15:04:10 +0800137 TP_PROTO(unsigned long call_site, const void *ptr,
138 size_t bytes_req, size_t bytes_alloc,
139 gfp_t gfp_flags, int node),
Steven Rostedtea20d922009-04-10 08:54:16 -0400140
Li Zefan53d04222009-11-26 15:04:10 +0800141 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
Steven Rostedtea20d922009-04-10 08:54:16 -0400142);
143
Li Zefan53d04222009-11-26 15:04:10 +0800144DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
145
146 TP_PROTO(unsigned long call_site, const void *ptr,
147 size_t bytes_req, size_t bytes_alloc,
148 gfp_t gfp_flags, int node),
149
150 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
151);
152
153DECLARE_EVENT_CLASS(kmem_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400154
155 TP_PROTO(unsigned long call_site, const void *ptr),
156
157 TP_ARGS(call_site, ptr),
158
159 TP_STRUCT__entry(
160 __field( unsigned long, call_site )
161 __field( const void *, ptr )
162 ),
163
164 TP_fast_assign(
165 __entry->call_site = call_site;
166 __entry->ptr = ptr;
167 ),
168
169 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
170);
171
Li Zefan53d04222009-11-26 15:04:10 +0800172DEFINE_EVENT(kmem_free, kfree,
Steven Rostedtea20d922009-04-10 08:54:16 -0400173
174 TP_PROTO(unsigned long call_site, const void *ptr),
175
Li Zefan53d04222009-11-26 15:04:10 +0800176 TP_ARGS(call_site, ptr)
177);
Steven Rostedtea20d922009-04-10 08:54:16 -0400178
Li Zefan53d04222009-11-26 15:04:10 +0800179DEFINE_EVENT(kmem_free, kmem_cache_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400180
Li Zefan53d04222009-11-26 15:04:10 +0800181 TP_PROTO(unsigned long call_site, const void *ptr),
Steven Rostedtea20d922009-04-10 08:54:16 -0400182
Li Zefan53d04222009-11-26 15:04:10 +0800183 TP_ARGS(call_site, ptr)
Steven Rostedtea20d922009-04-10 08:54:16 -0400184);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700185
186TRACE_EVENT(mm_page_free_direct,
187
188 TP_PROTO(struct page *page, unsigned int order),
189
190 TP_ARGS(page, order),
191
192 TP_STRUCT__entry(
193 __field( struct page *, page )
194 __field( unsigned int, order )
195 ),
196
197 TP_fast_assign(
198 __entry->page = page;
199 __entry->order = order;
200 ),
201
202 TP_printk("page=%p pfn=%lu order=%d",
203 __entry->page,
204 page_to_pfn(__entry->page),
205 __entry->order)
206);
207
208TRACE_EVENT(mm_pagevec_free,
209
210 TP_PROTO(struct page *page, int cold),
211
212 TP_ARGS(page, cold),
213
214 TP_STRUCT__entry(
215 __field( struct page *, page )
216 __field( int, cold )
217 ),
218
219 TP_fast_assign(
220 __entry->page = page;
221 __entry->cold = cold;
222 ),
223
224 TP_printk("page=%p pfn=%lu order=0 cold=%d",
225 __entry->page,
226 page_to_pfn(__entry->page),
227 __entry->cold)
228);
229
230TRACE_EVENT(mm_page_alloc,
231
232 TP_PROTO(struct page *page, unsigned int order,
233 gfp_t gfp_flags, int migratetype),
234
235 TP_ARGS(page, order, gfp_flags, migratetype),
236
237 TP_STRUCT__entry(
238 __field( struct page *, page )
239 __field( unsigned int, order )
240 __field( gfp_t, gfp_flags )
241 __field( int, migratetype )
242 ),
243
244 TP_fast_assign(
245 __entry->page = page;
246 __entry->order = order;
247 __entry->gfp_flags = gfp_flags;
248 __entry->migratetype = migratetype;
249 ),
250
251 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
252 __entry->page,
253 page_to_pfn(__entry->page),
254 __entry->order,
255 __entry->migratetype,
256 show_gfp_flags(__entry->gfp_flags))
257);
258
Li Zefan53d04222009-11-26 15:04:10 +0800259DECLARE_EVENT_CLASS(mm_page,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700260
261 TP_PROTO(struct page *page, unsigned int order, int migratetype),
262
263 TP_ARGS(page, order, migratetype),
264
265 TP_STRUCT__entry(
266 __field( struct page *, page )
267 __field( unsigned int, order )
268 __field( int, migratetype )
269 ),
270
271 TP_fast_assign(
272 __entry->page = page;
273 __entry->order = order;
274 __entry->migratetype = migratetype;
275 ),
276
277 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
278 __entry->page,
279 page_to_pfn(__entry->page),
280 __entry->order,
281 __entry->migratetype,
282 __entry->order == 0)
283);
284
Li Zefan53d04222009-11-26 15:04:10 +0800285DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700286
Li Zefan53d04222009-11-26 15:04:10 +0800287 TP_PROTO(struct page *page, unsigned int order, int migratetype),
288
289 TP_ARGS(page, order, migratetype)
290);
291
292DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
293
294 TP_PROTO(struct page *page, unsigned int order, int migratetype),
Mel Gorman0d3d0622009-09-21 17:02:44 -0700295
296 TP_ARGS(page, order, migratetype),
297
Mel Gorman0d3d0622009-09-21 17:02:44 -0700298 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
Li Zefan53d04222009-11-26 15:04:10 +0800299 __entry->page, page_to_pfn(__entry->page),
300 __entry->order, __entry->migratetype)
Mel Gorman0d3d0622009-09-21 17:02:44 -0700301);
302
Mel Gormane0fff1b2009-09-21 17:02:42 -0700303TRACE_EVENT(mm_page_alloc_extfrag,
304
305 TP_PROTO(struct page *page,
306 int alloc_order, int fallback_order,
307 int alloc_migratetype, int fallback_migratetype),
308
309 TP_ARGS(page,
310 alloc_order, fallback_order,
311 alloc_migratetype, fallback_migratetype),
312
313 TP_STRUCT__entry(
314 __field( struct page *, page )
315 __field( int, alloc_order )
316 __field( int, fallback_order )
317 __field( int, alloc_migratetype )
318 __field( int, fallback_migratetype )
319 ),
320
321 TP_fast_assign(
322 __entry->page = page;
323 __entry->alloc_order = alloc_order;
324 __entry->fallback_order = fallback_order;
325 __entry->alloc_migratetype = alloc_migratetype;
326 __entry->fallback_migratetype = fallback_migratetype;
327 ),
328
329 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
330 __entry->page,
331 page_to_pfn(__entry->page),
332 __entry->alloc_order,
333 __entry->fallback_order,
334 pageblock_order,
335 __entry->alloc_migratetype,
336 __entry->fallback_migratetype,
337 __entry->fallback_order < pageblock_order,
338 __entry->alloc_migratetype == __entry->fallback_migratetype)
339);
340
Steven Rostedta8d154b2009-04-10 09:36:00 -0400341#endif /* _TRACE_KMEM_H */
Steven Rostedtea20d922009-04-10 08:54:16 -0400342
Steven Rostedta8d154b2009-04-10 09:36:00 -0400343/* This part must be outside protection */
344#include <trace/define_trace.h>