blob: f5b46a4bc70ae9edf8bd1cdb72f8f3c883677e69 [file] [log] [blame]
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +05301/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Li Zefand0b6e042009-07-13 10:33:21 +080013#undef TRACE_SYSTEM
14#define TRACE_SYSTEM kmem
15
Steven Rostedtea20d922009-04-10 08:54:16 -040016#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
Zhaolei02af61b2009-04-10 14:26:18 +080017#define _TRACE_KMEM_H
Eduard - Gabriel Munteanub9ce08c2008-08-10 20:14:03 +030018
19#include <linux/types.h>
Zhaoleifc182a42009-04-10 14:27:38 +080020#include <linux/tracepoint.h>
Vlastimil Babka420adbe92016-03-15 14:55:52 -070021#include <trace/events/mmflags.h>
Steven Rostedt62ba1802009-05-15 16:16:30 -040022
Li Zefan53d04222009-11-26 15:04:10 +080023DECLARE_EVENT_CLASS(kmem_alloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040024
25 TP_PROTO(unsigned long call_site,
26 const void *ptr,
27 size_t bytes_req,
28 size_t bytes_alloc,
29 gfp_t gfp_flags),
30
31 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
32
33 TP_STRUCT__entry(
34 __field( unsigned long, call_site )
35 __field( const void *, ptr )
36 __field( size_t, bytes_req )
37 __field( size_t, bytes_alloc )
38 __field( gfp_t, gfp_flags )
39 ),
40
41 TP_fast_assign(
42 __entry->call_site = call_site;
43 __entry->ptr = ptr;
44 __entry->bytes_req = bytes_req;
45 __entry->bytes_alloc = bytes_alloc;
46 __entry->gfp_flags = gfp_flags;
47 ),
48
Steven Rostedt62ba1802009-05-15 16:16:30 -040049 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
Steven Rostedtea20d922009-04-10 08:54:16 -040050 __entry->call_site,
51 __entry->ptr,
52 __entry->bytes_req,
53 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -040054 show_gfp_flags(__entry->gfp_flags))
Steven Rostedtea20d922009-04-10 08:54:16 -040055);
56
Li Zefan53d04222009-11-26 15:04:10 +080057DEFINE_EVENT(kmem_alloc, kmalloc,
Steven Rostedtea20d922009-04-10 08:54:16 -040058
Li Zefan53d04222009-11-26 15:04:10 +080059 TP_PROTO(unsigned long call_site, const void *ptr,
60 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -040061
Li Zefan53d04222009-11-26 15:04:10 +080062 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
Steven Rostedtea20d922009-04-10 08:54:16 -040063);
64
Li Zefan53d04222009-11-26 15:04:10 +080065DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
66
67 TP_PROTO(unsigned long call_site, const void *ptr,
68 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
69
70 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
71);
72
73DECLARE_EVENT_CLASS(kmem_alloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -040074
75 TP_PROTO(unsigned long call_site,
76 const void *ptr,
77 size_t bytes_req,
78 size_t bytes_alloc,
79 gfp_t gfp_flags,
80 int node),
81
82 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
83
84 TP_STRUCT__entry(
85 __field( unsigned long, call_site )
86 __field( const void *, ptr )
87 __field( size_t, bytes_req )
88 __field( size_t, bytes_alloc )
89 __field( gfp_t, gfp_flags )
90 __field( int, node )
91 ),
92
93 TP_fast_assign(
94 __entry->call_site = call_site;
95 __entry->ptr = ptr;
96 __entry->bytes_req = bytes_req;
97 __entry->bytes_alloc = bytes_alloc;
98 __entry->gfp_flags = gfp_flags;
99 __entry->node = node;
100 ),
101
Steven Rostedt62ba1802009-05-15 16:16:30 -0400102 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400103 __entry->call_site,
104 __entry->ptr,
105 __entry->bytes_req,
106 __entry->bytes_alloc,
Steven Rostedt62ba1802009-05-15 16:16:30 -0400107 show_gfp_flags(__entry->gfp_flags),
Steven Rostedtea20d922009-04-10 08:54:16 -0400108 __entry->node)
109);
110
Li Zefan53d04222009-11-26 15:04:10 +0800111DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
Steven Rostedtea20d922009-04-10 08:54:16 -0400112
Li Zefan53d04222009-11-26 15:04:10 +0800113 TP_PROTO(unsigned long call_site, const void *ptr,
114 size_t bytes_req, size_t bytes_alloc,
115 gfp_t gfp_flags, int node),
Steven Rostedtea20d922009-04-10 08:54:16 -0400116
Li Zefan53d04222009-11-26 15:04:10 +0800117 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
Steven Rostedtea20d922009-04-10 08:54:16 -0400118);
119
Li Zefan53d04222009-11-26 15:04:10 +0800120DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
121
122 TP_PROTO(unsigned long call_site, const void *ptr,
123 size_t bytes_req, size_t bytes_alloc,
124 gfp_t gfp_flags, int node),
125
126 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
127);
128
129DECLARE_EVENT_CLASS(kmem_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400130
131 TP_PROTO(unsigned long call_site, const void *ptr),
132
133 TP_ARGS(call_site, ptr),
134
135 TP_STRUCT__entry(
136 __field( unsigned long, call_site )
137 __field( const void *, ptr )
138 ),
139
140 TP_fast_assign(
141 __entry->call_site = call_site;
142 __entry->ptr = ptr;
143 ),
144
145 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
146);
147
Li Zefan53d04222009-11-26 15:04:10 +0800148DEFINE_EVENT(kmem_free, kfree,
Steven Rostedtea20d922009-04-10 08:54:16 -0400149
150 TP_PROTO(unsigned long call_site, const void *ptr),
151
Li Zefan53d04222009-11-26 15:04:10 +0800152 TP_ARGS(call_site, ptr)
153);
Steven Rostedtea20d922009-04-10 08:54:16 -0400154
Steven Rostedt (Red Hat)633f6f52016-02-19 13:59:54 -0500155DEFINE_EVENT(kmem_free, kmem_cache_free,
Steven Rostedtea20d922009-04-10 08:54:16 -0400156
Li Zefan53d04222009-11-26 15:04:10 +0800157 TP_PROTO(unsigned long call_site, const void *ptr),
Steven Rostedtea20d922009-04-10 08:54:16 -0400158
Steven Rostedt (Red Hat)633f6f52016-02-19 13:59:54 -0500159 TP_ARGS(call_site, ptr)
Steven Rostedtea20d922009-04-10 08:54:16 -0400160);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700161
Steven Rostedt (Red Hat)633f6f52016-02-19 13:59:54 -0500162TRACE_EVENT(mm_page_free,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700163
164 TP_PROTO(struct page *page, unsigned int order),
165
166 TP_ARGS(page, order),
167
168 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900169 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700170 __field( unsigned int, order )
171 ),
172
173 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900174 __entry->pfn = page_to_pfn(page);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700175 __entry->order = order;
176 ),
177
178 TP_printk("page=%p pfn=%lu order=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900179 pfn_to_page(__entry->pfn),
180 __entry->pfn,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700181 __entry->order)
182);
183
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -0800184TRACE_EVENT(mm_page_free_batched,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700185
186 TP_PROTO(struct page *page, int cold),
187
188 TP_ARGS(page, cold),
189
190 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900191 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700192 __field( int, cold )
193 ),
194
195 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900196 __entry->pfn = page_to_pfn(page);
Mel Gorman4b4f2782009-09-21 17:02:41 -0700197 __entry->cold = cold;
198 ),
199
200 TP_printk("page=%p pfn=%lu order=0 cold=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900201 pfn_to_page(__entry->pfn),
202 __entry->pfn,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700203 __entry->cold)
204);
205
206TRACE_EVENT(mm_page_alloc,
207
208 TP_PROTO(struct page *page, unsigned int order,
209 gfp_t gfp_flags, int migratetype),
210
211 TP_ARGS(page, order, gfp_flags, migratetype),
212
213 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900214 __field( unsigned long, pfn )
Mel Gorman4b4f2782009-09-21 17:02:41 -0700215 __field( unsigned int, order )
216 __field( gfp_t, gfp_flags )
217 __field( int, migratetype )
218 ),
219
220 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900221 __entry->pfn = page ? page_to_pfn(page) : -1UL;
Mel Gorman4b4f2782009-09-21 17:02:41 -0700222 __entry->order = order;
223 __entry->gfp_flags = gfp_flags;
224 __entry->migratetype = migratetype;
225 ),
226
227 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900228 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
229 __entry->pfn != -1UL ? __entry->pfn : 0,
Mel Gorman4b4f2782009-09-21 17:02:41 -0700230 __entry->order,
231 __entry->migratetype,
232 show_gfp_flags(__entry->gfp_flags))
233);
234
Li Zefan53d04222009-11-26 15:04:10 +0800235DECLARE_EVENT_CLASS(mm_page,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700236
237 TP_PROTO(struct page *page, unsigned int order, int migratetype),
238
239 TP_ARGS(page, order, migratetype),
240
241 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900242 __field( unsigned long, pfn )
Mel Gorman0d3d0622009-09-21 17:02:44 -0700243 __field( unsigned int, order )
244 __field( int, migratetype )
245 ),
246
247 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900248 __entry->pfn = page ? page_to_pfn(page) : -1UL;
Mel Gorman0d3d0622009-09-21 17:02:44 -0700249 __entry->order = order;
250 __entry->migratetype = migratetype;
251 ),
252
253 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900254 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
255 __entry->pfn != -1UL ? __entry->pfn : 0,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700256 __entry->order,
257 __entry->migratetype,
258 __entry->order == 0)
259);
260
Li Zefan53d04222009-11-26 15:04:10 +0800261DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
Mel Gorman0d3d0622009-09-21 17:02:44 -0700262
Li Zefan53d04222009-11-26 15:04:10 +0800263 TP_PROTO(struct page *page, unsigned int order, int migratetype),
264
265 TP_ARGS(page, order, migratetype)
266);
267
Steven Rostedt (Red Hat)633f6f52016-02-19 13:59:54 -0500268TRACE_EVENT(mm_page_pcpu_drain,
Li Zefan53d04222009-11-26 15:04:10 +0800269
270 TP_PROTO(struct page *page, unsigned int order, int migratetype),
Mel Gorman0d3d0622009-09-21 17:02:44 -0700271
272 TP_ARGS(page, order, migratetype),
273
Shreyas B. Prabhu649b8de2015-05-28 15:44:22 -0700274 TP_STRUCT__entry(
275 __field( unsigned long, pfn )
276 __field( unsigned int, order )
277 __field( int, migratetype )
278 ),
279
280 TP_fast_assign(
281 __entry->pfn = page ? page_to_pfn(page) : -1UL;
282 __entry->order = order;
283 __entry->migratetype = migratetype;
284 ),
285
Mel Gorman0d3d0622009-09-21 17:02:44 -0700286 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900287 pfn_to_page(__entry->pfn), __entry->pfn,
Li Zefan53d04222009-11-26 15:04:10 +0800288 __entry->order, __entry->migratetype)
Mel Gorman0d3d0622009-09-21 17:02:44 -0700289);
290
Mel Gormane0fff1b2009-09-21 17:02:42 -0700291TRACE_EVENT(mm_page_alloc_extfrag,
292
293 TP_PROTO(struct page *page,
KOSAKI Motohiro52c8f6a2013-11-12 15:08:19 -0800294 int alloc_order, int fallback_order,
Vlastimil Babka99592d52015-02-11 15:28:15 -0800295 int alloc_migratetype, int fallback_migratetype),
Mel Gormane0fff1b2009-09-21 17:02:42 -0700296
297 TP_ARGS(page,
298 alloc_order, fallback_order,
Vlastimil Babka99592d52015-02-11 15:28:15 -0800299 alloc_migratetype, fallback_migratetype),
Mel Gormane0fff1b2009-09-21 17:02:42 -0700300
301 TP_STRUCT__entry(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900302 __field( unsigned long, pfn )
Mel Gormane0fff1b2009-09-21 17:02:42 -0700303 __field( int, alloc_order )
304 __field( int, fallback_order )
305 __field( int, alloc_migratetype )
306 __field( int, fallback_migratetype )
Srivatsa S. Bhatf92310c2013-09-11 14:20:36 -0700307 __field( int, change_ownership )
Mel Gormane0fff1b2009-09-21 17:02:42 -0700308 ),
309
310 TP_fast_assign(
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900311 __entry->pfn = page_to_pfn(page);
Mel Gormane0fff1b2009-09-21 17:02:42 -0700312 __entry->alloc_order = alloc_order;
313 __entry->fallback_order = fallback_order;
314 __entry->alloc_migratetype = alloc_migratetype;
315 __entry->fallback_migratetype = fallback_migratetype;
Vlastimil Babka99592d52015-02-11 15:28:15 -0800316 __entry->change_ownership = (alloc_migratetype ==
317 get_pageblock_migratetype(page));
Mel Gormane0fff1b2009-09-21 17:02:42 -0700318 ),
319
320 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
Namhyung Kim9fdd8a82015-04-06 14:36:09 +0900321 pfn_to_page(__entry->pfn),
322 __entry->pfn,
Mel Gormane0fff1b2009-09-21 17:02:42 -0700323 __entry->alloc_order,
324 __entry->fallback_order,
325 pageblock_order,
326 __entry->alloc_migratetype,
327 __entry->fallback_migratetype,
328 __entry->fallback_order < pageblock_order,
Srivatsa S. Bhatf92310c2013-09-11 14:20:36 -0700329 __entry->change_ownership)
Mel Gormane0fff1b2009-09-21 17:02:42 -0700330);
331
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700332
333DECLARE_EVENT_CLASS(ion_alloc,
334
335 TP_PROTO(const char *client_name,
336 const char *heap_name,
337 size_t len,
338 unsigned int mask,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530339 unsigned int flags,
340 pid_t client_pid,
341 char *current_comm,
342 pid_t current_pid,
343 void *buffer),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700344
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530345 TP_ARGS(client_name, heap_name, len, mask, flags, client_pid,
346 current_comm, current_pid, buffer),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700347
348 TP_STRUCT__entry(
349 __array(char, client_name, 64)
350 __field(const char *, heap_name)
351 __field(size_t, len)
352 __field(unsigned int, mask)
353 __field(unsigned int, flags)
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530354 __field(pid_t, client_pid)
355 __array(char, current_comm, 16)
356 __field(pid_t, current_pid)
357 __field(void *, buffer)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700358 ),
359
360 TP_fast_assign(
361 strlcpy(__entry->client_name, client_name, 64);
362 __entry->heap_name = heap_name;
363 __entry->len = len;
364 __entry->mask = mask;
365 __entry->flags = flags;
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530366 __entry->client_pid = client_pid;
367 strlcpy(__entry->current_comm, current_comm, 16);
368 __entry->current_pid = current_pid;
369 __entry->buffer = buffer;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700370 ),
371
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530372 TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x "
373 "client_pid=%d current_comm=%s current_pid=%d "
374 "buffer=%pK",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700375 __entry->client_name,
376 __entry->heap_name,
377 __entry->len,
378 __entry->mask,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530379 __entry->flags,
380 __entry->client_pid,
381 __entry->current_comm,
382 __entry->current_pid,
383 __entry->buffer)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700384);
385
386DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
387
388 TP_PROTO(const char *client_name,
389 const char *heap_name,
390 size_t len,
391 unsigned int mask,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530392 unsigned int flags,
393 pid_t client_pid,
394 char *current_comm,
395 pid_t current_pid,
396 void *buffer),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700397
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530398 TP_ARGS(client_name, heap_name, len, mask, flags, client_pid,
399 current_comm, current_pid, buffer)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700400);
401
402DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
403
404 TP_PROTO(const char *client_name,
405 const char *heap_name,
406 size_t len,
407 unsigned int mask,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530408 unsigned int flags,
409 pid_t client_pid,
410 char *current_comm,
411 pid_t current_pid,
412 void *buffer),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700413
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530414 TP_ARGS(client_name, heap_name, len, mask, flags, client_pid,
415 current_comm, current_pid, buffer)
416);
417
418DECLARE_EVENT_CLASS(ion_free,
419
420 TP_PROTO(const char *client_name,
421 pid_t client_pid,
422 char *current_comm,
423 pid_t current_pid,
424 void *buffer,
425 size_t size),
426
427 TP_ARGS(client_name, client_pid, current_comm, current_pid,
428 buffer, size),
429
430 TP_STRUCT__entry(
431 __array(char, client_name, 64)
432 __field(pid_t, client_pid)
433 __array(char, current_comm, 16)
434 __field(pid_t, current_pid)
435 __field(void *, buffer)
436 __field(size_t, size)
437 ),
438
439 TP_fast_assign(
440 strlcpy(__entry->client_name, client_name, 64);
441 __entry->client_pid = client_pid;
442 strlcpy(__entry->current_comm, current_comm, 16);
443 __entry->current_pid = current_pid;
444 __entry->buffer = buffer;
445 __entry->size = size;
446 ),
447
448 TP_printk("client_name=%s client_pid=%d current_comm=%s "
449 "current_pid=%d buffer=%pK size=%zu",
450 __entry->client_name,
451 __entry->client_pid,
452 __entry->current_comm,
453 __entry->current_pid,
454 __entry->buffer,
455 __entry->size)
456);
457
458DEFINE_EVENT(ion_free, ion_free_buffer,
459
460 TP_PROTO(const char *client_name,
461 pid_t client_pid,
462 char *current_comm,
463 pid_t current_pid,
464 void *buffer,
465 size_t size),
466
467 TP_ARGS(client_name, client_pid, current_comm, current_pid,
468 buffer, size)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700469);
470
471DECLARE_EVENT_CLASS(ion_alloc_error,
472
473 TP_PROTO(const char *client_name,
474 const char *heap_name,
475 size_t len,
476 unsigned int mask,
477 unsigned int flags,
478 long error),
479
480 TP_ARGS(client_name, heap_name, len, mask, flags, error),
481
482 TP_STRUCT__entry(
483 __field(const char *, client_name)
484 __field(const char *, heap_name)
485 __field(size_t, len)
486 __field(unsigned int, mask)
487 __field(unsigned int, flags)
488 __field(long, error)
489 ),
490
491 TP_fast_assign(
492 __entry->client_name = client_name;
493 __entry->heap_name = heap_name;
494 __entry->len = len;
495 __entry->mask = mask;
496 __entry->flags = flags;
497 __entry->error = error;
498 ),
499
500 TP_printk(
501 "client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
502 __entry->client_name,
503 __entry->heap_name,
504 __entry->len,
505 __entry->mask,
506 __entry->flags,
507 __entry->error)
508);
509
510
511DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
512
513 TP_PROTO(const char *client_name,
514 const char *heap_name,
515 size_t len,
516 unsigned int mask,
517 unsigned int flags,
518 long error),
519
520 TP_ARGS(client_name, heap_name, len, mask, flags, error)
521);
522
523DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
524
525 TP_PROTO(const char *client_name,
526 const char *heap_name,
527 size_t len,
528 unsigned int mask,
529 unsigned int flags,
530 long error),
531
532 TP_ARGS(client_name, heap_name, len, mask, flags, error)
533);
534
535
536DECLARE_EVENT_CLASS(alloc_retry,
537
538 TP_PROTO(int tries),
539
540 TP_ARGS(tries),
541
542 TP_STRUCT__entry(
543 __field(int, tries)
544 ),
545
546 TP_fast_assign(
547 __entry->tries = tries;
548 ),
549
550 TP_printk("tries=%d",
551 __entry->tries)
552);
553
554DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
555
556 TP_PROTO(int tries),
557
558 TP_ARGS(tries)
559);
560
561DEFINE_EVENT(alloc_retry, migrate_retry,
562
563 TP_PROTO(int tries),
564
565 TP_ARGS(tries)
566);
567
568DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
569
570 TP_PROTO(int tries),
571
572 TP_ARGS(tries)
573);
574
575DECLARE_EVENT_CLASS(migrate_pages,
576
577 TP_PROTO(int mode),
578
579 TP_ARGS(mode),
580
581 TP_STRUCT__entry(
582 __field(int, mode)
583 ),
584
585 TP_fast_assign(
586 __entry->mode = mode;
587 ),
588
589 TP_printk("mode=%d",
590 __entry->mode)
591);
592
593DEFINE_EVENT(migrate_pages, migrate_pages_start,
594
595 TP_PROTO(int mode),
596
597 TP_ARGS(mode)
598);
599
600DEFINE_EVENT(migrate_pages, migrate_pages_end,
601
602 TP_PROTO(int mode),
603
604 TP_ARGS(mode)
605);
606
607DECLARE_EVENT_CLASS(ion_alloc_pages,
608
609 TP_PROTO(gfp_t gfp_flags,
610 unsigned int order),
611
612 TP_ARGS(gfp_flags, order),
613
614 TP_STRUCT__entry(
615 __field(gfp_t, gfp_flags)
616 __field(unsigned int, order)
617 ),
618
619 TP_fast_assign(
620 __entry->gfp_flags = gfp_flags;
621 __entry->order = order;
622 ),
623
624 TP_printk("gfp_flags=%s order=%d",
625 show_gfp_flags(__entry->gfp_flags),
626 __entry->order)
627 );
628
629DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
630 TP_PROTO(gfp_t gfp_flags,
631 unsigned int order),
632
633 TP_ARGS(gfp_flags, order)
634 );
635
636DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
637 TP_PROTO(gfp_t gfp_flags,
638 unsigned int order),
639
640 TP_ARGS(gfp_flags, order)
641 );
642
643DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
644 TP_PROTO(gfp_t gfp_flags,
645 unsigned int order),
646
647 TP_ARGS(gfp_flags, order)
648 );
649
650DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
651 TP_PROTO(gfp_t gfp_flags,
652 unsigned int order),
653
654 TP_ARGS(gfp_flags, order)
655 );
656
657DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
658 TP_PROTO(gfp_t gfp_flags,
659 unsigned int order),
660
661 TP_ARGS(gfp_flags, order)
662 );
663
664DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
665 TP_PROTO(gfp_t gfp_flags,
666 unsigned int order),
667
668 TP_ARGS(gfp_flags, order)
669
670 );
671
672DECLARE_EVENT_CLASS(smmu_map,
673
674 TP_PROTO(unsigned long va,
675 phys_addr_t pa,
676 unsigned long chunk_size,
677 size_t len),
678
679 TP_ARGS(va, pa, chunk_size, len),
680
681 TP_STRUCT__entry(
682 __field(unsigned long, va)
683 __field(phys_addr_t, pa)
684 __field(unsigned long, chunk_size)
685 __field(size_t, len)
686 ),
687
688 TP_fast_assign(
689 __entry->va = va;
690 __entry->pa = pa;
691 __entry->chunk_size = chunk_size;
692 __entry->len = len;
693 ),
694
695 TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lx len=%zu",
696 (void *)__entry->va,
697 &__entry->pa,
698 __entry->chunk_size,
699 __entry->len)
700 );
701
702DEFINE_EVENT(smmu_map, iommu_map_range,
703 TP_PROTO(unsigned long va,
704 phys_addr_t pa,
705 unsigned long chunk_size,
706 size_t len),
707
708 TP_ARGS(va, pa, chunk_size, len)
709 );
710
711DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
712
713 TP_PROTO(unsigned long len,
714 int pool_total,
715 bool is_prefetch),
716
717 TP_ARGS(len, pool_total, is_prefetch),
718
719 TP_STRUCT__entry(
720 __field(unsigned long, len)
721 __field(int, pool_total)
722 __field(bool, is_prefetch)
723 ),
724
725 TP_fast_assign(
726 __entry->len = len;
727 __entry->pool_total = pool_total;
728 __entry->is_prefetch = is_prefetch;
729 ),
730
731 TP_printk("len %lx, pool total %x is_prefetch %d",
732 __entry->len,
733 __entry->pool_total,
734 __entry->is_prefetch)
735 );
736
737DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
738 TP_PROTO(unsigned long len,
739 int pool_total,
740 bool is_prefetch),
741
742 TP_ARGS(len, pool_total, is_prefetch)
743 );
744
745DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
746 TP_PROTO(unsigned long len,
747 int pool_total,
748 bool is_prefetch),
749
750 TP_ARGS(len, pool_total, is_prefetch)
751 );
752
753DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
754
755 TP_PROTO(unsigned long drained_size,
756 unsigned long skipped_size),
757
758 TP_ARGS(drained_size, skipped_size),
759
760 TP_STRUCT__entry(
761 __field(unsigned long, drained_size)
762 __field(unsigned long, skipped_size)
763 ),
764
765 TP_fast_assign(
766 __entry->drained_size = drained_size;
767 __entry->skipped_size = skipped_size;
768 ),
769
770 TP_printk("drained size %lx, skipped size %lx",
771 __entry->drained_size,
772 __entry->skipped_size)
773 );
774
775DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
776 TP_PROTO(unsigned long drained_size,
777 unsigned long skipped_size),
778
779 TP_ARGS(drained_size, skipped_size)
780 );
781
782DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
783 TP_PROTO(unsigned long drained_size,
784 unsigned long skipped_size),
785
786 TP_ARGS(drained_size, skipped_size)
787 );
788
789TRACE_EVENT(ion_prefetching,
790
791 TP_PROTO(unsigned long len),
792
793 TP_ARGS(len),
794
795 TP_STRUCT__entry(
796 __field(unsigned long, len)
797 ),
798
799 TP_fast_assign(
800 __entry->len = len;
801 ),
802
803 TP_printk("prefetch size %lx",
804 __entry->len)
805 );
806
807DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
808
809 TP_PROTO(const char *heap_name,
810 unsigned long len,
811 unsigned long align,
812 unsigned long flags),
813
814 TP_ARGS(heap_name, len, align, flags),
815
816 TP_STRUCT__entry(
817 __field(const char *, heap_name)
818 __field(unsigned long, len)
819 __field(unsigned long, align)
820 __field(unsigned long, flags)
821 ),
822
823 TP_fast_assign(
824 __entry->heap_name = heap_name;
825 __entry->len = len;
826 __entry->align = align;
827 __entry->flags = flags;
828 ),
829
830 TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
831 __entry->heap_name,
832 __entry->len,
833 __entry->align,
834 __entry->flags)
835 );
836
837DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
838 TP_PROTO(const char *heap_name,
839 unsigned long len,
840 unsigned long align,
841 unsigned long flags),
842
843 TP_ARGS(heap_name, len, align, flags)
844 );
845
846DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
847 TP_PROTO(const char *heap_name,
848 unsigned long len,
849 unsigned long align,
850 unsigned long flags),
851
852 TP_ARGS(heap_name, len, align, flags)
853 );
854
855DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
856
857 TP_PROTO(const char *heap_name,
858 unsigned long len,
859 unsigned long align,
860 unsigned long flags),
861
862 TP_ARGS(heap_name, len, align, flags),
863
864 TP_STRUCT__entry(
865 __field(const char *, heap_name)
866 __field(unsigned long, len)
867 __field(unsigned long, align)
868 __field(unsigned long, flags)
869 ),
870
871 TP_fast_assign(
872 __entry->heap_name = heap_name;
873 __entry->len = len;
874 __entry->align = align;
875 __entry->flags = flags;
876 ),
877
878 TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
879 __entry->heap_name,
880 __entry->len,
881 __entry->align,
882 __entry->flags)
883 );
884
885DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
886 TP_PROTO(const char *heap_name,
887 unsigned long len,
888 unsigned long align,
889 unsigned long flags),
890
891 TP_ARGS(heap_name, len, align, flags)
892 );
893
894DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
895 TP_PROTO(const char *heap_name,
896 unsigned long len,
897 unsigned long align,
898 unsigned long flags),
899
900 TP_ARGS(heap_name, len, align, flags)
901 );
902
903DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
904
905 TP_PROTO(int sec_id,
906 int num,
907 unsigned long va,
908 unsigned int pa,
909 size_t len),
910
911 TP_ARGS(sec_id, num, va, pa, len),
912
913 TP_STRUCT__entry(
914 __field(int, sec_id)
915 __field(int, num)
916 __field(unsigned long, va)
917 __field(unsigned int, pa)
918 __field(size_t, len)
919 ),
920
921 TP_fast_assign(
922 __entry->sec_id = sec_id;
923 __entry->num = num;
924 __entry->va = va;
925 __entry->pa = pa;
926 __entry->len = len;
927 ),
928
929 TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
930 __entry->sec_id,
931 __entry->num,
932 __entry->va,
933 __entry->pa,
934 __entry->len)
935 );
936
937DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
938
939 TP_PROTO(int sec_id,
940 int num,
941 unsigned long va,
942 unsigned int pa,
943 size_t len),
944
945 TP_ARGS(sec_id, num, va, pa, len)
946 );
947
948DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
949
950 TP_PROTO(int sec_id,
951 int num,
952 unsigned long va,
953 unsigned int pa,
954 size_t len),
955
956 TP_ARGS(sec_id, num, va, pa, len)
957 );
Steven Rostedta8d154b2009-04-10 09:36:00 -0400958#endif /* _TRACE_KMEM_H */
Steven Rostedtea20d922009-04-10 08:54:16 -0400959
Steven Rostedta8d154b2009-04-10 09:36:00 -0400960/* This part must be outside protection */
961#include <trace/define_trace.h>