blob: f7554fd7fc62b92d6b1d73d7db6030599b552014 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
Zhaolei02af61b2009-04-10 14:26:18 +08005#define _TRACE_KMEM_H
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +03006
7#include <linux/types.h>
Zhaoleifc182a42009-04-10 14:27:38 +08008#include <linux/tracepoint.h>
David Howellsa1ce3922012-10-02 18:01:25 +01009#include <trace/events/gfpflags.h>
Steven Rostedt62ba1802009-05-15 16:16:30 -040010
Li Zefan53d04222009-11-26 15:04:10 +080011DECLARE_EVENT_CLASS(kmem_alloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040012
13 TP_PROTO(unsigned long call_site,
14 const void *ptr,
15 size_t bytes_req,
16 size_t bytes_alloc,
17 gfp_t gfp_flags),
18
19 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
20
21 TP_STRUCT__entry(
22 __field( unsigned long, call_site )
23 __field( const void *, ptr )
24 __field( size_t, bytes_req )
25 __field( size_t, bytes_alloc )
26 __field( gfp_t, gfp_flags )
27 ),
28
29 TP_fast_assign(
30 __entry->call_site = call_site;
31 __entry->ptr = ptr;
32 __entry->bytes_req = bytes_req;
33 __entry->bytes_alloc = bytes_alloc;
34 __entry->gfp_flags = gfp_flags;
35 ),
36
Steven Rostedt62ba1802009-05-15 16:16:30 -040037 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -040038 __entry->call_site,
39 __entry->ptr,
40 __entry->bytes_req,
41 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040042 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -040043);
44
Li Zefan53d04222009-11-26 15:04:10 +080045DEFINE_EVENT(kmem_alloc, kmalloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040046
Li Zefan53d04222009-11-26 15:04:10 +080047 TP_PROTO(unsigned long call_site, const void *ptr,
48 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040049
Li Zefan53d04222009-11-26 15:04:10 +080050 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
Steven Rostedtea20d922009-04-10 08:54:16 -040051);
52
Li Zefan53d04222009-11-26 15:04:10 +080053DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
54
55 TP_PROTO(unsigned long call_site, const void *ptr,
56 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57
58 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
59);
60
61DECLARE_EVENT_CLASS(kmem_alloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -040062
63 TP_PROTO(unsigned long call_site,
64 const void *ptr,
65 size_t bytes_req,
66 size_t bytes_alloc,
67 gfp_t gfp_flags,
68 int node),
69
70 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
71
72 TP_STRUCT__entry(
73 __field( unsigned long, call_site )
74 __field( const void *, ptr )
75 __field( size_t, bytes_req )
76 __field( size_t, bytes_alloc )
77 __field( gfp_t, gfp_flags )
78 __field( int, node )
79 ),
80
81 TP_fast_assign(
82 __entry->call_site = call_site;
83 __entry->ptr = ptr;
84 __entry->bytes_req = bytes_req;
85 __entry->bytes_alloc = bytes_alloc;
86 __entry->gfp_flags = gfp_flags;
87 __entry->node = node;
88 ),
89
Steven Rostedt62ba1802009-05-15 16:16:30 -040090 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -040091 __entry->call_site,
92 __entry->ptr,
93 __entry->bytes_req,
94 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040095 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040096 __entry->node)
97);
98
Li Zefan53d04222009-11-26 15:04:10 +080099DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -0400100
Li Zefan53d04222009-11-26 15:04:10 +0800101 TP_PROTO(unsigned long call_site, const void *ptr,
102 size_t bytes_req, size_t bytes_alloc,
103 gfp_t gfp_flags, int node),
Steven Rostedtea20d922009-04-10 08:54:16 -0400104
Li Zefan53d04222009-11-26 15:04:10 +0800105 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
Steven Rostedtea20d922009-04-10 08:54:16 -0400106);
107
Li Zefan53d04222009-11-26 15:04:10 +0800108DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
109
110 TP_PROTO(unsigned long call_site, const void *ptr,
111 size_t bytes_req, size_t bytes_alloc,
112 gfp_t gfp_flags, int node),
113
114 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115);
116
117DECLARE_EVENT_CLASS(kmem_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400118
119 TP_PROTO(unsigned long call_site, const void *ptr),
120
121 TP_ARGS(call_site, ptr),
122
123 TP_STRUCT__entry(
124 __field( unsigned long, call_site )
125 __field( const void *, ptr )
126 ),
127
128 TP_fast_assign(
129 __entry->call_site = call_site;
130 __entry->ptr = ptr;
131 ),
132
133 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
134);
135
Li Zefan53d04222009-11-26 15:04:10 +0800136DEFINE_EVENT(kmem_free, kfree,
Steven Rostedtea20d922009-04-10 08:54:16 -0400137
138 TP_PROTO(unsigned long call_site, const void *ptr),
139
Li Zefan53d04222009-11-26 15:04:10 +0800140 TP_ARGS(call_site, ptr)
141);
Steven Rostedtea20d922009-04-10 08:54:16 -0400142
Shreyas B. Prabhue5feb1e2015-05-28 15:44:16 -0700143DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400144
Li Zefan53d04222009-11-26 15:04:10 +0800145 TP_PROTO(unsigned long call_site, const void *ptr),
Steven Rostedtea20d922009-04-10 08:54:16 -0400146
Shreyas B. Prabhue5feb1e2015-05-28 15:44:16 -0700147 TP_ARGS(call_site, ptr),
148
149 /*
150 * This trace can be potentially called from an offlined cpu.
151 * Since trace points use RCU and RCU should not be used from
152 * offline cpus, filter such calls out.
153 * While this trace can be called from a preemptable section,
154 * it has no impact on the condition since tasks can migrate
155 * only from online cpus to other online cpus. Thus its safe
156 * to use raw_smp_processor_id.
157 */
158 TP_CONDITION(cpu_online(raw_smp_processor_id()))
Steven Rostedtea20d922009-04-10 08:54:16 -0400159);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700160
Shreyas B. Prabhu1f0c27b2015-05-28 15:44:19 -0700161TRACE_EVENT_CONDITION(mm_page_free,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700162
163 TP_PROTO(struct page *page, unsigned int order),
164
165 TP_ARGS(page, order),
166
Shreyas B. Prabhu1f0c27b2015-05-28 15:44:19 -0700167
168 /*
169 * This trace can be potentially called from an offlined cpu.
170 * Since trace points use RCU and RCU should not be used from
171 * offline cpus, filter such calls out.
172 * While this trace can be called from a preemptable section,
173 * it has no impact on the condition since tasks can migrate
174 * only from online cpus to other online cpus. Thus its safe
175 * to use raw_smp_processor_id.
176 */
177 TP_CONDITION(cpu_online(raw_smp_processor_id())),
178
Mel Gorman4b4f2782009-09-21 17:02:41 -0700179 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900180 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700181 __field( unsigned int, order )
182 ),
183
184 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900185 __entry->pfn = page_to_pfn(page);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700186 __entry->order = order;
187 ),
188
189 TP_printk("page=%p pfn=%lu order=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900190 pfn_to_page(__entry->pfn),
191 __entry->pfn,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700192 __entry->order)
193);
194
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -0800195TRACE_EVENT(mm_page_free_batched,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700196
197 TP_PROTO(struct page *page, int cold),
198
199 TP_ARGS(page, cold),
200
201 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900202 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700203 __field( int, cold )
204 ),
205
206 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900207 __entry->pfn = page_to_pfn(page);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700208 __entry->cold = cold;
209 ),
210
211 TP_printk("page=%p pfn=%lu order=0 cold=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900212 pfn_to_page(__entry->pfn),
213 __entry->pfn,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700214 __entry->cold)
215);
216
217TRACE_EVENT(mm_page_alloc,
218
219 TP_PROTO(struct page *page, unsigned int order,
220 gfp_t gfp_flags, int migratetype),
221
222 TP_ARGS(page, order, gfp_flags, migratetype),
223
224 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900225 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700226 __field( unsigned int, order )
227 __field( gfp_t, gfp_flags )
228 __field( int, migratetype )
229 ),
230
231 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900232 __entry->pfn = page ? page_to_pfn(page) : -1UL;
Mel Gorman4b4f2782009-09-21 17:02:41 -0700233 __entry->order = order;
234 __entry->gfp_flags = gfp_flags;
235 __entry->migratetype = migratetype;
236 ),
237
238 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900239 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
240 __entry->pfn != -1UL ? __entry->pfn : 0,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700241 __entry->order,
242 __entry->migratetype,
243 show_gfp_flags(__entry->gfp_flags))
244);
245
Li Zefan53d04222009-11-26 15:04:10 +0800246DECLARE_EVENT_CLASS(mm_page,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700247
248 TP_PROTO(struct page *page, unsigned int order, int migratetype),
249
250 TP_ARGS(page, order, migratetype),
251
252 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900253 __field( unsigned long, pfn )
Mel Gorman0d3d0622009-09-21 17:02:44 -0700254 __field( unsigned int, order )
255 __field( int, migratetype )
256 ),
257
258 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900259 __entry->pfn = page ? page_to_pfn(page) : -1UL;
Mel Gorman0d3d0622009-09-21 17:02:44 -0700260 __entry->order = order;
261 __entry->migratetype = migratetype;
262 ),
263
264 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900265 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
266 __entry->pfn != -1UL ? __entry->pfn : 0,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700267 __entry->order,
268 __entry->migratetype,
269 __entry->order == 0)
270);
271
Li Zefan53d04222009-11-26 15:04:10 +0800272DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700273
Li Zefan53d04222009-11-26 15:04:10 +0800274 TP_PROTO(struct page *page, unsigned int order, int migratetype),
275
276 TP_ARGS(page, order, migratetype)
277);
278
Shreyas B. Prabhu649b8de2015-05-28 15:44:22 -0700279TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
Li Zefan53d04222009-11-26 15:04:10 +0800280
281 TP_PROTO(struct page *page, unsigned int order, int migratetype),
Mel Gorman0d3d0622009-09-21 17:02:44 -0700282
283 TP_ARGS(page, order, migratetype),
284
Shreyas B. Prabhu649b8de2015-05-28 15:44:22 -0700285 /*
286 * This trace can be potentially called from an offlined cpu.
287 * Since trace points use RCU and RCU should not be used from
288 * offline cpus, filter such calls out.
289 * While this trace can be called from a preemptable section,
290 * it has no impact on the condition since tasks can migrate
291 * only from online cpus to other online cpus. Thus its safe
292 * to use raw_smp_processor_id.
293 */
294 TP_CONDITION(cpu_online(raw_smp_processor_id())),
295
296 TP_STRUCT__entry(
297 __field( unsigned long, pfn )
298 __field( unsigned int, order )
299 __field( int, migratetype )
300 ),
301
302 TP_fast_assign(
303 __entry->pfn = page ? page_to_pfn(page) : -1UL;
304 __entry->order = order;
305 __entry->migratetype = migratetype;
306 ),
307
Mel Gorman0d3d0622009-09-21 17:02:44 -0700308 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900309 pfn_to_page(__entry->pfn), __entry->pfn,
Li Zefan53d04222009-11-26 15:04:10 +0800310 __entry->order, __entry->migratetype)
Mel Gorman0d3d0622009-09-21 17:02:44 -0700311);
312
Mel Gormane0fff1b2009-09-21 17:02:42 -0700313TRACE_EVENT(mm_page_alloc_extfrag,
314
315 TP_PROTO(struct page *page,
KOSAKI Motohiro52c8f6a2013-11-12 15:08:19 -0800316 int alloc_order, int fallback_order,
Vlastimil Babka99592d52015-02-11 15:28:15 -0800317 int alloc_migratetype, int fallback_migratetype),
Mel Gormane0fff1b2009-09-21 17:02:42 -0700318
319 TP_ARGS(page,
320 alloc_order, fallback_order,
Vlastimil Babka99592d52015-02-11 15:28:15 -0800321 alloc_migratetype, fallback_migratetype),
Mel Gormane0fff1b2009-09-21 17:02:42 -0700322
323 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900324 __field( unsigned long, pfn )
Mel Gormane0fff1b2009-09-21 17:02:42 -0700325 __field( int, alloc_order )
326 __field( int, fallback_order )
327 __field( int, alloc_migratetype )
328 __field( int, fallback_migratetype )
Srivatsa S. Bhatf92310c2013-09-11 14:20:36 -0700329 __field( int, change_ownership )
Mel Gormane0fff1b2009-09-21 17:02:42 -0700330 ),
331
332 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900333 __entry->pfn = page_to_pfn(page);
Mel Gormane0fff1b2009-09-21 17:02:42 -0700334 __entry->alloc_order = alloc_order;
335 __entry->fallback_order = fallback_order;
336 __entry->alloc_migratetype = alloc_migratetype;
337 __entry->fallback_migratetype = fallback_migratetype;
Vlastimil Babka99592d52015-02-11 15:28:15 -0800338 __entry->change_ownership = (alloc_migratetype ==
339 get_pageblock_migratetype(page));
Mel Gormane0fff1b2009-09-21 17:02:42 -0700340 ),
341
342 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900343 pfn_to_page(__entry->pfn),
344 __entry->pfn,
Mel Gormane0fff1b2009-09-21 17:02:42 -0700345 __entry->alloc_order,
346 __entry->fallback_order,
347 pageblock_order,
348 __entry->alloc_migratetype,
349 __entry->fallback_migratetype,
350 __entry->fallback_order < pageblock_order,
Srivatsa S. Bhatf92310c2013-09-11 14:20:36 -0700351 __entry->change_ownership)
Mel Gormane0fff1b2009-09-21 17:02:42 -0700352);
353
Steven Rostedta8d154b2009-04-10 09:36:00 -0400354#endif /* _TRACE_KMEM_H */
Steven Rostedtea20d922009-04-10 08:54:16 -0400355
Steven Rostedta8d154b2009-04-10 09:36:00 -0400356/* This part must be outside protection */
357#include <trace/define_trace.h>