blob: e76e8225706ff0ce764312e686f0c176e34c8dd5 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
Zhaolei02af61b2009-04-10 14:26:18 +08005#define _TRACE_KMEM_H
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03006
7#include <linux/types.h>
Zhaoleifc182a42009-04-10 14:27:38 +08008#include <linux/tracepoint.h>
Mel Gorman33906bc2010-08-09 17:19:16 -07009#include "gfpflags.h"
Steven Rostedt62ba1802009-05-15 16:16:30 -040010
Li Zefan53d04222009-11-26 15:04:10 +080011DECLARE_EVENT_CLASS(kmem_alloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040012
13 TP_PROTO(unsigned long call_site,
14 const void *ptr,
15 size_t bytes_req,
16 size_t bytes_alloc,
17 gfp_t gfp_flags),
18
19 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
20
21 TP_STRUCT__entry(
22 __field( unsigned long, call_site )
23 __field( const void *, ptr )
24 __field( size_t, bytes_req )
25 __field( size_t, bytes_alloc )
26 __field( gfp_t, gfp_flags )
27 ),
28
29 TP_fast_assign(
30 __entry->call_site = call_site;
31 __entry->ptr = ptr;
32 __entry->bytes_req = bytes_req;
33 __entry->bytes_alloc = bytes_alloc;
34 __entry->gfp_flags = gfp_flags;
35 ),
36
Steven Rostedt62ba1802009-05-15 16:16:30 -040037 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -040038 __entry->call_site,
39 __entry->ptr,
40 __entry->bytes_req,
41 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040042 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -040043);
44
Li Zefan53d04222009-11-26 15:04:10 +080045DEFINE_EVENT(kmem_alloc, kmalloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040046
Li Zefan53d04222009-11-26 15:04:10 +080047 TP_PROTO(unsigned long call_site, const void *ptr,
48 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040049
Li Zefan53d04222009-11-26 15:04:10 +080050 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
Steven Rostedtea20d922009-04-10 08:54:16 -040051);
52
Li Zefan53d04222009-11-26 15:04:10 +080053DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
54
55 TP_PROTO(unsigned long call_site, const void *ptr,
56 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57
58 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
59);
60
61DECLARE_EVENT_CLASS(kmem_alloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -040062
63 TP_PROTO(unsigned long call_site,
64 const void *ptr,
65 size_t bytes_req,
66 size_t bytes_alloc,
67 gfp_t gfp_flags,
68 int node),
69
70 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
71
72 TP_STRUCT__entry(
73 __field( unsigned long, call_site )
74 __field( const void *, ptr )
75 __field( size_t, bytes_req )
76 __field( size_t, bytes_alloc )
77 __field( gfp_t, gfp_flags )
78 __field( int, node )
79 ),
80
81 TP_fast_assign(
82 __entry->call_site = call_site;
83 __entry->ptr = ptr;
84 __entry->bytes_req = bytes_req;
85 __entry->bytes_alloc = bytes_alloc;
86 __entry->gfp_flags = gfp_flags;
87 __entry->node = node;
88 ),
89
Steven Rostedt62ba1802009-05-15 16:16:30 -040090 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -040091 __entry->call_site,
92 __entry->ptr,
93 __entry->bytes_req,
94 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040095 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040096 __entry->node)
97);
98
Li Zefan53d04222009-11-26 15:04:10 +080099DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -0400100
Li Zefan53d04222009-11-26 15:04:10 +0800101 TP_PROTO(unsigned long call_site, const void *ptr,
102 size_t bytes_req, size_t bytes_alloc,
103 gfp_t gfp_flags, int node),
Steven Rostedtea20d922009-04-10 08:54:16 -0400104
Li Zefan53d04222009-11-26 15:04:10 +0800105 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
Steven Rostedtea20d922009-04-10 08:54:16 -0400106);
107
Li Zefan53d04222009-11-26 15:04:10 +0800108DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
109
110 TP_PROTO(unsigned long call_site, const void *ptr,
111 size_t bytes_req, size_t bytes_alloc,
112 gfp_t gfp_flags, int node),
113
114 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115);
116
117DECLARE_EVENT_CLASS(kmem_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400118
119 TP_PROTO(unsigned long call_site, const void *ptr),
120
121 TP_ARGS(call_site, ptr),
122
123 TP_STRUCT__entry(
124 __field( unsigned long, call_site )
125 __field( const void *, ptr )
126 ),
127
128 TP_fast_assign(
129 __entry->call_site = call_site;
130 __entry->ptr = ptr;
131 ),
132
133 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
134);
135
Li Zefan53d04222009-11-26 15:04:10 +0800136DEFINE_EVENT(kmem_free, kfree,
Steven Rostedtea20d922009-04-10 08:54:16 -0400137
138 TP_PROTO(unsigned long call_site, const void *ptr),
139
Li Zefan53d04222009-11-26 15:04:10 +0800140 TP_ARGS(call_site, ptr)
141);
Steven Rostedtea20d922009-04-10 08:54:16 -0400142
Li Zefan53d04222009-11-26 15:04:10 +0800143DEFINE_EVENT(kmem_free, kmem_cache_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400144
Li Zefan53d04222009-11-26 15:04:10 +0800145 TP_PROTO(unsigned long call_site, const void *ptr),
Steven Rostedtea20d922009-04-10 08:54:16 -0400146
Li Zefan53d04222009-11-26 15:04:10 +0800147 TP_ARGS(call_site, ptr)
Steven Rostedtea20d922009-04-10 08:54:16 -0400148);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700149
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -0800150TRACE_EVENT(mm_page_free,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700151
152 TP_PROTO(struct page *page, unsigned int order),
153
154 TP_ARGS(page, order),
155
156 TP_STRUCT__entry(
157 __field( struct page *, page )
158 __field( unsigned int, order )
159 ),
160
161 TP_fast_assign(
162 __entry->page = page;
163 __entry->order = order;
164 ),
165
166 TP_printk("page=%p pfn=%lu order=%d",
167 __entry->page,
168 page_to_pfn(__entry->page),
169 __entry->order)
170);
171
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -0800172TRACE_EVENT(mm_page_free_batched,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700173
174 TP_PROTO(struct page *page, int cold),
175
176 TP_ARGS(page, cold),
177
178 TP_STRUCT__entry(
179 __field( struct page *, page )
180 __field( int, cold )
181 ),
182
183 TP_fast_assign(
184 __entry->page = page;
185 __entry->cold = cold;
186 ),
187
188 TP_printk("page=%p pfn=%lu order=0 cold=%d",
189 __entry->page,
190 page_to_pfn(__entry->page),
191 __entry->cold)
192);
193
194TRACE_EVENT(mm_page_alloc,
195
196 TP_PROTO(struct page *page, unsigned int order,
197 gfp_t gfp_flags, int migratetype),
198
199 TP_ARGS(page, order, gfp_flags, migratetype),
200
201 TP_STRUCT__entry(
202 __field( struct page *, page )
203 __field( unsigned int, order )
204 __field( gfp_t, gfp_flags )
205 __field( int, migratetype )
206 ),
207
208 TP_fast_assign(
209 __entry->page = page;
210 __entry->order = order;
211 __entry->gfp_flags = gfp_flags;
212 __entry->migratetype = migratetype;
213 ),
214
215 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
216 __entry->page,
217 page_to_pfn(__entry->page),
218 __entry->order,
219 __entry->migratetype,
220 show_gfp_flags(__entry->gfp_flags))
221);
222
Li Zefan53d04222009-11-26 15:04:10 +0800223DECLARE_EVENT_CLASS(mm_page,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700224
225 TP_PROTO(struct page *page, unsigned int order, int migratetype),
226
227 TP_ARGS(page, order, migratetype),
228
229 TP_STRUCT__entry(
230 __field( struct page *, page )
231 __field( unsigned int, order )
232 __field( int, migratetype )
233 ),
234
235 TP_fast_assign(
236 __entry->page = page;
237 __entry->order = order;
238 __entry->migratetype = migratetype;
239 ),
240
241 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
242 __entry->page,
243 page_to_pfn(__entry->page),
244 __entry->order,
245 __entry->migratetype,
246 __entry->order == 0)
247);
248
Li Zefan53d04222009-11-26 15:04:10 +0800249DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700250
Li Zefan53d04222009-11-26 15:04:10 +0800251 TP_PROTO(struct page *page, unsigned int order, int migratetype),
252
253 TP_ARGS(page, order, migratetype)
254);
255
256DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
257
258 TP_PROTO(struct page *page, unsigned int order, int migratetype),
Mel Gorman0d3d0622009-09-21 17:02:44 -0700259
260 TP_ARGS(page, order, migratetype),
261
Mel Gorman0d3d0622009-09-21 17:02:44 -0700262 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
Li Zefan53d04222009-11-26 15:04:10 +0800263 __entry->page, page_to_pfn(__entry->page),
264 __entry->order, __entry->migratetype)
Mel Gorman0d3d0622009-09-21 17:02:44 -0700265);
266
Mel Gormane0fff1b2009-09-21 17:02:42 -0700267TRACE_EVENT(mm_page_alloc_extfrag,
268
269 TP_PROTO(struct page *page,
270 int alloc_order, int fallback_order,
271 int alloc_migratetype, int fallback_migratetype),
272
273 TP_ARGS(page,
274 alloc_order, fallback_order,
275 alloc_migratetype, fallback_migratetype),
276
277 TP_STRUCT__entry(
278 __field( struct page *, page )
279 __field( int, alloc_order )
280 __field( int, fallback_order )
281 __field( int, alloc_migratetype )
282 __field( int, fallback_migratetype )
283 ),
284
285 TP_fast_assign(
286 __entry->page = page;
287 __entry->alloc_order = alloc_order;
288 __entry->fallback_order = fallback_order;
289 __entry->alloc_migratetype = alloc_migratetype;
290 __entry->fallback_migratetype = fallback_migratetype;
291 ),
292
293 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
294 __entry->page,
295 page_to_pfn(__entry->page),
296 __entry->alloc_order,
297 __entry->fallback_order,
298 pageblock_order,
299 __entry->alloc_migratetype,
300 __entry->fallback_migratetype,
301 __entry->fallback_order < pageblock_order,
302 __entry->alloc_migratetype == __entry->fallback_migratetype)
303);
304
Liam Markcc2d4bd2013-01-16 10:14:40 -0800305
306DECLARE_EVENT_CLASS(ion_alloc,
307
308 TP_PROTO(const char *client_name,
309 const char *heap_name,
310 size_t len,
311 unsigned int mask,
312 unsigned int flags),
313
314 TP_ARGS(client_name, heap_name, len, mask, flags),
315
316 TP_STRUCT__entry(
317 __field(const char *, client_name)
318 __field(const char *, heap_name)
319 __field(size_t, len)
320 __field(unsigned int, mask)
321 __field(unsigned int, flags)
322 ),
323
324 TP_fast_assign(
325 __entry->client_name = client_name;
326 __entry->heap_name = heap_name;
327 __entry->len = len;
328 __entry->mask = mask;
329 __entry->flags = flags;
330 ),
331
332 TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
333 __entry->client_name,
334 __entry->heap_name,
335 __entry->len,
336 __entry->mask,
337 __entry->flags)
338);
339
340DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
341
342 TP_PROTO(const char *client_name,
343 const char *heap_name,
344 size_t len,
345 unsigned int mask,
346 unsigned int flags),
347
348 TP_ARGS(client_name, heap_name, len, mask, flags)
349);
350
351DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
352
353 TP_PROTO(const char *client_name,
354 const char *heap_name,
355 size_t len,
356 unsigned int mask,
357 unsigned int flags),
358
359 TP_ARGS(client_name, heap_name, len, mask, flags)
360);
361
362DECLARE_EVENT_CLASS(ion_alloc_error,
363
364 TP_PROTO(const char *client_name,
365 const char *heap_name,
366 size_t len,
367 unsigned int mask,
368 unsigned int flags,
369 long error),
370
371 TP_ARGS(client_name, heap_name, len, mask, flags, error),
372
373 TP_STRUCT__entry(
374 __field(const char *, client_name)
375 __field(const char *, heap_name)
376 __field(size_t, len)
377 __field(unsigned int, mask)
378 __field(unsigned int, flags)
379 __field(long, error)
380 ),
381
382 TP_fast_assign(
383 __entry->client_name = client_name;
384 __entry->heap_name = heap_name;
385 __entry->len = len;
386 __entry->mask = mask;
387 __entry->flags = flags;
388 __entry->error = error;
389 ),
390
391 TP_printk(
392 "client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
393 __entry->client_name,
394 __entry->heap_name,
395 __entry->len,
396 __entry->mask,
397 __entry->flags,
398 __entry->error)
399);
400
401
402DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
403
404 TP_PROTO(const char *client_name,
405 const char *heap_name,
406 size_t len,
407 unsigned int mask,
408 unsigned int flags,
409 long error),
410
411 TP_ARGS(client_name, heap_name, len, mask, flags, error)
412);
413
414DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
415
416 TP_PROTO(const char *client_name,
417 const char *heap_name,
418 size_t len,
419 unsigned int mask,
420 unsigned int flags,
421 long error),
422
423 TP_ARGS(client_name, heap_name, len, mask, flags, error)
424);
425
426
427DECLARE_EVENT_CLASS(alloc_retry,
428
429 TP_PROTO(int tries),
430
431 TP_ARGS(tries),
432
433 TP_STRUCT__entry(
434 __field(int, tries)
435 ),
436
437 TP_fast_assign(
438 __entry->tries = tries;
439 ),
440
441 TP_printk("tries=%d",
442 __entry->tries)
443);
444
445DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
446
447 TP_PROTO(int tries),
448
449 TP_ARGS(tries)
450);
451
452DEFINE_EVENT(alloc_retry, migrate_retry,
453
454 TP_PROTO(int tries),
455
456 TP_ARGS(tries)
457);
458
459DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
460
461 TP_PROTO(int tries),
462
463 TP_ARGS(tries)
464);
465
466DECLARE_EVENT_CLASS(migrate_pages,
467
468 TP_PROTO(int mode),
469
470 TP_ARGS(mode),
471
472 TP_STRUCT__entry(
473 __field(int, mode)
474 ),
475
476 TP_fast_assign(
477 __entry->mode = mode;
478 ),
479
480 TP_printk("mode=%d",
481 __entry->mode)
482);
483
484DEFINE_EVENT(migrate_pages, migrate_pages_start,
485
486 TP_PROTO(int mode),
487
488 TP_ARGS(mode)
489);
490
491DEFINE_EVENT(migrate_pages, migrate_pages_end,
492
493 TP_PROTO(int mode),
494
495 TP_ARGS(mode)
496);
497
Adrian Alexei21f62bd2013-04-22 12:57:41 -0700498DECLARE_EVENT_CLASS(ion_alloc_pages,
499
500 TP_PROTO(gfp_t gfp_flags,
501 unsigned int order),
502
503 TP_ARGS(gfp_flags, order),
504
505 TP_STRUCT__entry(
506 __field(gfp_t, gfp_flags)
507 __field(unsigned int, order)
508 ),
509
510 TP_fast_assign(
511 __entry->gfp_flags = gfp_flags;
512 __entry->order = order;
513 ),
514
515 TP_printk("gfp_flags=%s order=%d",
516 show_gfp_flags(__entry->gfp_flags),
517 __entry->order)
518 );
519
520DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
521 TP_PROTO(gfp_t gfp_flags,
522 unsigned int order),
523
524 TP_ARGS(gfp_flags, order)
525 );
526
527DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
528 TP_PROTO(gfp_t gfp_flags,
529 unsigned int order),
530
531 TP_ARGS(gfp_flags, order)
532 );
533
534DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
535 TP_PROTO(gfp_t gfp_flags,
536 unsigned int order),
537
538 TP_ARGS(gfp_flags, order)
539 );
540
541DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
542 TP_PROTO(gfp_t gfp_flags,
543 unsigned int order),
544
545 TP_ARGS(gfp_flags, order)
546 );
547
548DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
549 TP_PROTO(gfp_t gfp_flags,
550 unsigned int order),
551
552 TP_ARGS(gfp_flags, order)
553 );
554
555DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
556 TP_PROTO(gfp_t gfp_flags,
557 unsigned int order),
558
559 TP_ARGS(gfp_flags, order)
560 );
561
Steven Rostedta8d154b2009-04-10 09:36:00 -0400562#endif /* _TRACE_KMEM_H */
Steven Rostedtea20d922009-04-10 08:54:16 -0400563
Steven Rostedta8d154b2009-04-10 09:36:00 -0400564/* This part must be outside protection */
565#include <trace/define_trace.h>