blob: 0393a7f3fa35c5dc4a169f7912b84f5b8a872e92 [file] [log] [blame]
Li Zefanba77c9e2009-11-20 15:53:25 +08001#include "builtin.h"
2#include "perf.h"
3
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03004#include "util/evlist.h"
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03005#include "util/evsel.h"
Li Zefanba77c9e2009-11-20 15:53:25 +08006#include "util/util.h"
7#include "util/cache.h"
8#include "util/symbol.h"
9#include "util/thread.h"
10#include "util/header.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020011#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020012#include "util/tool.h"
Namhyung Kimc9758cc2015-04-21 13:55:02 +090013#include "util/callchain.h"
Li Zefanba77c9e2009-11-20 15:53:25 +080014
15#include "util/parse-options.h"
16#include "util/trace-event.h"
Jiri Olsaf5fc1412013-10-15 16:27:32 +020017#include "util/data.h"
Don Zickus4b627952014-04-07 14:55:23 -040018#include "util/cpumap.h"
Li Zefanba77c9e2009-11-20 15:53:25 +080019
20#include "util/debug.h"
Li Zefanba77c9e2009-11-20 15:53:25 +080021
22#include <linux/rbtree.h>
Arnaldo Carvalho de Melo8d9233f2013-01-24 22:24:57 -030023#include <linux/string.h>
Namhyung Kim77cfe382015-03-23 15:30:40 +090024#include <locale.h>
Namhyung Kimc9758cc2015-04-21 13:55:02 +090025#include <regex.h>
Li Zefanba77c9e2009-11-20 15:53:25 +080026
Namhyung Kim0d68bc92015-04-06 14:36:10 +090027static int kmem_slab;
28static int kmem_page;
29
30static long kmem_page_size;
31
Li Zefanba77c9e2009-11-20 15:53:25 +080032struct alloc_stat;
Namhyung Kimfb4f3132015-04-21 13:55:03 +090033typedef int (*sort_fn_t)(void *, void *);
Li Zefanba77c9e2009-11-20 15:53:25 +080034
Li Zefanba77c9e2009-11-20 15:53:25 +080035static int alloc_flag;
36static int caller_flag;
37
Li Zefanba77c9e2009-11-20 15:53:25 +080038static int alloc_lines = -1;
39static int caller_lines = -1;
40
Li Zefan7707b6b2009-11-24 13:25:48 +080041static bool raw_ip;
42
Li Zefanba77c9e2009-11-20 15:53:25 +080043struct alloc_stat {
Li Zefan079d3f62009-11-24 13:26:55 +080044 u64 call_site;
45 u64 ptr;
Li Zefanba77c9e2009-11-20 15:53:25 +080046 u64 bytes_req;
47 u64 bytes_alloc;
48 u32 hit;
Li Zefan079d3f62009-11-24 13:26:55 +080049 u32 pingpong;
50
51 short alloc_cpu;
Li Zefanba77c9e2009-11-20 15:53:25 +080052
53 struct rb_node node;
54};
55
56static struct rb_root root_alloc_stat;
57static struct rb_root root_alloc_sorted;
58static struct rb_root root_caller_stat;
59static struct rb_root root_caller_sorted;
60
61static unsigned long total_requested, total_allocated;
Li Zefan7d0d3942009-11-24 13:26:31 +080062static unsigned long nr_allocs, nr_cross_allocs;
Li Zefanba77c9e2009-11-20 15:53:25 +080063
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -030064static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
65 int bytes_req, int bytes_alloc, int cpu)
Li Zefanba77c9e2009-11-20 15:53:25 +080066{
67 struct rb_node **node = &root_alloc_stat.rb_node;
68 struct rb_node *parent = NULL;
69 struct alloc_stat *data = NULL;
70
Li Zefanba77c9e2009-11-20 15:53:25 +080071 while (*node) {
72 parent = *node;
73 data = rb_entry(*node, struct alloc_stat, node);
74
75 if (ptr > data->ptr)
76 node = &(*node)->rb_right;
77 else if (ptr < data->ptr)
78 node = &(*node)->rb_left;
79 else
80 break;
81 }
82
83 if (data && data->ptr == ptr) {
84 data->hit++;
85 data->bytes_req += bytes_req;
Wenji Huang4efb5292009-12-21 17:52:55 +080086 data->bytes_alloc += bytes_alloc;
Li Zefanba77c9e2009-11-20 15:53:25 +080087 } else {
88 data = malloc(sizeof(*data));
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -030089 if (!data) {
90 pr_err("%s: malloc failed\n", __func__);
91 return -1;
92 }
Li Zefanba77c9e2009-11-20 15:53:25 +080093 data->ptr = ptr;
Li Zefan079d3f62009-11-24 13:26:55 +080094 data->pingpong = 0;
Li Zefanba77c9e2009-11-20 15:53:25 +080095 data->hit = 1;
96 data->bytes_req = bytes_req;
97 data->bytes_alloc = bytes_alloc;
98
99 rb_link_node(&data->node, parent, node);
100 rb_insert_color(&data->node, &root_alloc_stat);
101 }
Li Zefan079d3f62009-11-24 13:26:55 +0800102 data->call_site = call_site;
103 data->alloc_cpu = cpu;
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300104 return 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800105}
106
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300107static int insert_caller_stat(unsigned long call_site,
Li Zefanba77c9e2009-11-20 15:53:25 +0800108 int bytes_req, int bytes_alloc)
109{
110 struct rb_node **node = &root_caller_stat.rb_node;
111 struct rb_node *parent = NULL;
112 struct alloc_stat *data = NULL;
113
Li Zefanba77c9e2009-11-20 15:53:25 +0800114 while (*node) {
115 parent = *node;
116 data = rb_entry(*node, struct alloc_stat, node);
117
118 if (call_site > data->call_site)
119 node = &(*node)->rb_right;
120 else if (call_site < data->call_site)
121 node = &(*node)->rb_left;
122 else
123 break;
124 }
125
126 if (data && data->call_site == call_site) {
127 data->hit++;
128 data->bytes_req += bytes_req;
Wenji Huang4efb5292009-12-21 17:52:55 +0800129 data->bytes_alloc += bytes_alloc;
Li Zefanba77c9e2009-11-20 15:53:25 +0800130 } else {
131 data = malloc(sizeof(*data));
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300132 if (!data) {
133 pr_err("%s: malloc failed\n", __func__);
134 return -1;
135 }
Li Zefanba77c9e2009-11-20 15:53:25 +0800136 data->call_site = call_site;
Li Zefan079d3f62009-11-24 13:26:55 +0800137 data->pingpong = 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800138 data->hit = 1;
139 data->bytes_req = bytes_req;
140 data->bytes_alloc = bytes_alloc;
141
142 rb_link_node(&data->node, parent, node);
143 rb_insert_color(&data->node, &root_caller_stat);
144 }
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300145
146 return 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800147}
148
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300149static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300150 struct perf_sample *sample)
Li Zefanba77c9e2009-11-20 15:53:25 +0800151{
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300152 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
153 call_site = perf_evsel__intval(evsel, sample, "call_site");
154 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
155 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
Li Zefanba77c9e2009-11-20 15:53:25 +0800156
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300157 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300158 insert_caller_stat(call_site, bytes_req, bytes_alloc))
159 return -1;
Li Zefanba77c9e2009-11-20 15:53:25 +0800160
161 total_requested += bytes_req;
162 total_allocated += bytes_alloc;
Li Zefan7d0d3942009-11-24 13:26:31 +0800163
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300164 nr_allocs++;
165 return 0;
166}
167
168static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
169 struct perf_sample *sample)
170{
171 int ret = perf_evsel__process_alloc_event(evsel, sample);
172
173 if (!ret) {
Don Zickus4b627952014-04-07 14:55:23 -0400174 int node1 = cpu__get_node(sample->cpu),
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300175 node2 = perf_evsel__intval(evsel, sample, "node");
176
Li Zefan7d0d3942009-11-24 13:26:31 +0800177 if (node1 != node2)
178 nr_cross_allocs++;
179 }
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300180
181 return ret;
Li Zefanba77c9e2009-11-20 15:53:25 +0800182}
183
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900184static int ptr_cmp(void *, void *);
185static int slab_callsite_cmp(void *, void *);
Li Zefan079d3f62009-11-24 13:26:55 +0800186
187static struct alloc_stat *search_alloc_stat(unsigned long ptr,
188 unsigned long call_site,
189 struct rb_root *root,
190 sort_fn_t sort_fn)
191{
192 struct rb_node *node = root->rb_node;
193 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
194
195 while (node) {
196 struct alloc_stat *data;
197 int cmp;
198
199 data = rb_entry(node, struct alloc_stat, node);
200
201 cmp = sort_fn(&key, data);
202 if (cmp < 0)
203 node = node->rb_left;
204 else if (cmp > 0)
205 node = node->rb_right;
206 else
207 return data;
208 }
209 return NULL;
210}
211
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300212static int perf_evsel__process_free_event(struct perf_evsel *evsel,
213 struct perf_sample *sample)
Li Zefanba77c9e2009-11-20 15:53:25 +0800214{
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300215 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
Li Zefan079d3f62009-11-24 13:26:55 +0800216 struct alloc_stat *s_alloc, *s_caller;
217
Li Zefan079d3f62009-11-24 13:26:55 +0800218 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
219 if (!s_alloc)
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300220 return 0;
Li Zefan079d3f62009-11-24 13:26:55 +0800221
Arnaldo Carvalho de Melo22ad7982012-08-07 10:56:43 -0300222 if ((short)sample->cpu != s_alloc->alloc_cpu) {
Li Zefan079d3f62009-11-24 13:26:55 +0800223 s_alloc->pingpong++;
224
225 s_caller = search_alloc_stat(0, s_alloc->call_site,
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900226 &root_caller_stat,
227 slab_callsite_cmp);
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300228 if (!s_caller)
229 return -1;
Li Zefan079d3f62009-11-24 13:26:55 +0800230 s_caller->pingpong++;
231 }
232 s_alloc->alloc_cpu = -1;
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -0300233
234 return 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800235}
236
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900237static u64 total_page_alloc_bytes;
238static u64 total_page_free_bytes;
239static u64 total_page_nomatch_bytes;
240static u64 total_page_fail_bytes;
241static unsigned long nr_page_allocs;
242static unsigned long nr_page_frees;
243static unsigned long nr_page_fails;
244static unsigned long nr_page_nomatch;
245
246static bool use_pfn;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900247static struct perf_session *kmem_session;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900248
249#define MAX_MIGRATE_TYPES 6
250#define MAX_PAGE_ORDER 11
251
252static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
253
254struct page_stat {
255 struct rb_node node;
256 u64 page;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900257 u64 callsite;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900258 int order;
259 unsigned gfp_flags;
260 unsigned migrate_type;
261 u64 alloc_bytes;
262 u64 free_bytes;
263 int nr_alloc;
264 int nr_free;
265};
266
267static struct rb_root page_tree;
268static struct rb_root page_alloc_tree;
269static struct rb_root page_alloc_sorted;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900270static struct rb_root page_caller_tree;
271static struct rb_root page_caller_sorted;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900272
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900273struct alloc_func {
274 u64 start;
275 u64 end;
276 char *name;
277};
278
279static int nr_alloc_funcs;
280static struct alloc_func *alloc_func_list;
281
282static int funcmp(const void *a, const void *b)
283{
284 const struct alloc_func *fa = a;
285 const struct alloc_func *fb = b;
286
287 if (fa->start > fb->start)
288 return 1;
289 else
290 return -1;
291}
292
293static int callcmp(const void *a, const void *b)
294{
295 const struct alloc_func *fa = a;
296 const struct alloc_func *fb = b;
297
298 if (fb->start <= fa->start && fa->end < fb->end)
299 return 0;
300
301 if (fa->start > fb->start)
302 return 1;
303 else
304 return -1;
305}
306
307static int build_alloc_func_list(void)
308{
309 int ret;
310 struct map *kernel_map;
311 struct symbol *sym;
312 struct rb_node *node;
313 struct alloc_func *func;
314 struct machine *machine = &kmem_session->machines.host;
315 regex_t alloc_func_regex;
316 const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
317
318 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
319 if (ret) {
320 char err[BUFSIZ];
321
322 regerror(ret, &alloc_func_regex, err, sizeof(err));
323 pr_err("Invalid regex: %s\n%s", pattern, err);
324 return -EINVAL;
325 }
326
327 kernel_map = machine->vmlinux_maps[MAP__FUNCTION];
328 if (map__load(kernel_map, NULL) < 0) {
329 pr_err("cannot load kernel map\n");
330 return -ENOENT;
331 }
332
333 map__for_each_symbol(kernel_map, sym, node) {
334 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
335 continue;
336
337 func = realloc(alloc_func_list,
338 (nr_alloc_funcs + 1) * sizeof(*func));
339 if (func == NULL)
340 return -ENOMEM;
341
342 pr_debug("alloc func: %s\n", sym->name);
343 func[nr_alloc_funcs].start = sym->start;
344 func[nr_alloc_funcs].end = sym->end;
345 func[nr_alloc_funcs].name = sym->name;
346
347 alloc_func_list = func;
348 nr_alloc_funcs++;
349 }
350
351 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
352
353 regfree(&alloc_func_regex);
354 return 0;
355}
356
357/*
358 * Find first non-memory allocation function from callchain.
359 * The allocation functions are in the 'alloc_func_list'.
360 */
361static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
362{
363 struct addr_location al;
364 struct machine *machine = &kmem_session->machines.host;
365 struct callchain_cursor_node *node;
366
367 if (alloc_func_list == NULL) {
368 if (build_alloc_func_list() < 0)
369 goto out;
370 }
371
372 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
373 sample__resolve_callchain(sample, NULL, evsel, &al, 16);
374
375 callchain_cursor_commit(&callchain_cursor);
376 while (true) {
377 struct alloc_func key, *caller;
378 u64 addr;
379
380 node = callchain_cursor_current(&callchain_cursor);
381 if (node == NULL)
382 break;
383
384 key.start = key.end = node->ip;
385 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
386 sizeof(key), callcmp);
387 if (!caller) {
388 /* found */
389 if (node->map)
390 addr = map__unmap_ip(node->map, node->ip);
391 else
392 addr = node->ip;
393
394 return addr;
395 } else
396 pr_debug3("skipping alloc function: %s\n", caller->name);
397
398 callchain_cursor_advance(&callchain_cursor);
399 }
400
401out:
402 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
403 return sample->ip;
404}
405
406static struct page_stat *
407__page_stat__findnew_page(u64 page, bool create)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900408{
409 struct rb_node **node = &page_tree.rb_node;
410 struct rb_node *parent = NULL;
411 struct page_stat *data;
412
413 while (*node) {
414 s64 cmp;
415
416 parent = *node;
417 data = rb_entry(*node, struct page_stat, node);
418
419 cmp = data->page - page;
420 if (cmp < 0)
421 node = &parent->rb_left;
422 else if (cmp > 0)
423 node = &parent->rb_right;
424 else
425 return data;
426 }
427
428 if (!create)
429 return NULL;
430
431 data = zalloc(sizeof(*data));
432 if (data != NULL) {
433 data->page = page;
434
435 rb_link_node(&data->node, parent, node);
436 rb_insert_color(&data->node, &page_tree);
437 }
438
439 return data;
440}
441
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900442static struct page_stat *page_stat__find_page(u64 page)
443{
444 return __page_stat__findnew_page(page, false);
445}
446
447static struct page_stat *page_stat__findnew_page(u64 page)
448{
449 return __page_stat__findnew_page(page, true);
450}
451
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900452struct sort_dimension {
453 const char name[20];
454 sort_fn_t cmp;
455 struct list_head list;
456};
457
458static LIST_HEAD(page_alloc_sort_input);
459static LIST_HEAD(page_caller_sort_input);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900460
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900461static struct page_stat *
462__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900463{
464 struct rb_node **node = &page_alloc_tree.rb_node;
465 struct rb_node *parent = NULL;
466 struct page_stat *data;
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900467 struct sort_dimension *sort;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900468
469 while (*node) {
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900470 int cmp = 0;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900471
472 parent = *node;
473 data = rb_entry(*node, struct page_stat, node);
474
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900475 list_for_each_entry(sort, &page_alloc_sort_input, list) {
476 cmp = sort->cmp(pstat, data);
477 if (cmp)
478 break;
479 }
480
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900481 if (cmp < 0)
482 node = &parent->rb_left;
483 else if (cmp > 0)
484 node = &parent->rb_right;
485 else
486 return data;
487 }
488
489 if (!create)
490 return NULL;
491
492 data = zalloc(sizeof(*data));
493 if (data != NULL) {
David Ahern6b1a2752015-04-14 13:49:33 -0400494 data->page = pstat->page;
495 data->order = pstat->order;
496 data->gfp_flags = pstat->gfp_flags;
497 data->migrate_type = pstat->migrate_type;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900498
499 rb_link_node(&data->node, parent, node);
500 rb_insert_color(&data->node, &page_alloc_tree);
501 }
502
503 return data;
504}
505
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900506static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
507{
508 return __page_stat__findnew_alloc(pstat, false);
509}
510
511static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
512{
513 return __page_stat__findnew_alloc(pstat, true);
514}
515
516static struct page_stat *
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900517__page_stat__findnew_caller(struct page_stat *pstat, bool create)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900518{
519 struct rb_node **node = &page_caller_tree.rb_node;
520 struct rb_node *parent = NULL;
521 struct page_stat *data;
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900522 struct sort_dimension *sort;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900523
524 while (*node) {
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900525 int cmp = 0;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900526
527 parent = *node;
528 data = rb_entry(*node, struct page_stat, node);
529
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900530 list_for_each_entry(sort, &page_caller_sort_input, list) {
531 cmp = sort->cmp(pstat, data);
532 if (cmp)
533 break;
534 }
535
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900536 if (cmp < 0)
537 node = &parent->rb_left;
538 else if (cmp > 0)
539 node = &parent->rb_right;
540 else
541 return data;
542 }
543
544 if (!create)
545 return NULL;
546
547 data = zalloc(sizeof(*data));
548 if (data != NULL) {
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900549 data->callsite = pstat->callsite;
550 data->order = pstat->order;
551 data->gfp_flags = pstat->gfp_flags;
552 data->migrate_type = pstat->migrate_type;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900553
554 rb_link_node(&data->node, parent, node);
555 rb_insert_color(&data->node, &page_caller_tree);
556 }
557
558 return data;
559}
560
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900561static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900562{
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900563 return __page_stat__findnew_caller(pstat, false);
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900564}
565
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900566static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900567{
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900568 return __page_stat__findnew_caller(pstat, true);
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900569}
570
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900571static bool valid_page(u64 pfn_or_page)
572{
573 if (use_pfn && pfn_or_page == -1UL)
574 return false;
575 if (!use_pfn && pfn_or_page == 0)
576 return false;
577 return true;
578}
579
580static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
581 struct perf_sample *sample)
582{
583 u64 page;
584 unsigned int order = perf_evsel__intval(evsel, sample, "order");
585 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
586 unsigned int migrate_type = perf_evsel__intval(evsel, sample,
587 "migratetype");
588 u64 bytes = kmem_page_size << order;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900589 u64 callsite;
David Ahern6b1a2752015-04-14 13:49:33 -0400590 struct page_stat *pstat;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900591 struct page_stat this = {
592 .order = order,
593 .gfp_flags = gfp_flags,
594 .migrate_type = migrate_type,
595 };
596
597 if (use_pfn)
598 page = perf_evsel__intval(evsel, sample, "pfn");
599 else
600 page = perf_evsel__intval(evsel, sample, "page");
601
602 nr_page_allocs++;
603 total_page_alloc_bytes += bytes;
604
605 if (!valid_page(page)) {
606 nr_page_fails++;
607 total_page_fail_bytes += bytes;
608
609 return 0;
610 }
611
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900612 callsite = find_callsite(evsel, sample);
613
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900614 /*
615 * This is to find the current page (with correct gfp flags and
616 * migrate type) at free event.
617 */
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900618 pstat = page_stat__findnew_page(page);
David Ahern6b1a2752015-04-14 13:49:33 -0400619 if (pstat == NULL)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900620 return -ENOMEM;
621
David Ahern6b1a2752015-04-14 13:49:33 -0400622 pstat->order = order;
623 pstat->gfp_flags = gfp_flags;
624 pstat->migrate_type = migrate_type;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900625 pstat->callsite = callsite;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900626
627 this.page = page;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900628 pstat = page_stat__findnew_alloc(&this);
David Ahern6b1a2752015-04-14 13:49:33 -0400629 if (pstat == NULL)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900630 return -ENOMEM;
631
David Ahern6b1a2752015-04-14 13:49:33 -0400632 pstat->nr_alloc++;
633 pstat->alloc_bytes += bytes;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900634 pstat->callsite = callsite;
635
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900636 this.callsite = callsite;
637 pstat = page_stat__findnew_caller(&this);
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900638 if (pstat == NULL)
639 return -ENOMEM;
640
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900641 pstat->nr_alloc++;
642 pstat->alloc_bytes += bytes;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900643
644 order_stats[order][migrate_type]++;
645
646 return 0;
647}
648
649static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
650 struct perf_sample *sample)
651{
652 u64 page;
653 unsigned int order = perf_evsel__intval(evsel, sample, "order");
654 u64 bytes = kmem_page_size << order;
David Ahern6b1a2752015-04-14 13:49:33 -0400655 struct page_stat *pstat;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900656 struct page_stat this = {
657 .order = order,
658 };
659
660 if (use_pfn)
661 page = perf_evsel__intval(evsel, sample, "pfn");
662 else
663 page = perf_evsel__intval(evsel, sample, "page");
664
665 nr_page_frees++;
666 total_page_free_bytes += bytes;
667
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900668 pstat = page_stat__find_page(page);
David Ahern6b1a2752015-04-14 13:49:33 -0400669 if (pstat == NULL) {
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900670 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
671 page, order);
672
673 nr_page_nomatch++;
674 total_page_nomatch_bytes += bytes;
675
676 return 0;
677 }
678
679 this.page = page;
David Ahern6b1a2752015-04-14 13:49:33 -0400680 this.gfp_flags = pstat->gfp_flags;
681 this.migrate_type = pstat->migrate_type;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900682 this.callsite = pstat->callsite;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900683
David Ahern6b1a2752015-04-14 13:49:33 -0400684 rb_erase(&pstat->node, &page_tree);
685 free(pstat);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900686
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900687 pstat = page_stat__find_alloc(&this);
688 if (pstat == NULL)
689 return -ENOENT;
690
691 pstat->nr_free++;
692 pstat->free_bytes += bytes;
693
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900694 pstat = page_stat__find_caller(&this);
David Ahern6b1a2752015-04-14 13:49:33 -0400695 if (pstat == NULL)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900696 return -ENOENT;
697
David Ahern6b1a2752015-04-14 13:49:33 -0400698 pstat->nr_free++;
699 pstat->free_bytes += bytes;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900700
701 return 0;
702}
703
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300704typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
705 struct perf_sample *sample);
Li Zefanba77c9e2009-11-20 15:53:25 +0800706
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300707static int process_sample_event(struct perf_tool *tool __maybe_unused,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200708 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200709 struct perf_sample *sample,
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -0300710 struct perf_evsel *evsel,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200711 struct machine *machine)
Li Zefanba77c9e2009-11-20 15:53:25 +0800712{
Adrian Hunteref893252013-08-27 11:23:06 +0300713 struct thread *thread = machine__findnew_thread(machine, sample->pid,
Namhyung Kim13ce34d2014-05-12 09:56:42 +0900714 sample->tid);
Li Zefanba77c9e2009-11-20 15:53:25 +0800715
Li Zefanba77c9e2009-11-20 15:53:25 +0800716 if (thread == NULL) {
717 pr_debug("problem processing %d event, skipping it.\n",
718 event->header.type);
719 return -1;
720 }
721
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +0200722 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
Li Zefanba77c9e2009-11-20 15:53:25 +0800723
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300724 if (evsel->handler != NULL) {
725 tracepoint_handler f = evsel->handler;
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -0300726 return f(evsel, sample);
727 }
728
729 return 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800730}
731
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -0300732static struct perf_tool perf_kmem = {
733 .sample = process_sample_event,
734 .comm = perf_event__process_comm,
Namhyung Kim64c40902014-08-01 14:59:31 +0900735 .mmap = perf_event__process_mmap,
736 .mmap2 = perf_event__process_mmap2,
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200737 .ordered_events = true,
Li Zefanba77c9e2009-11-20 15:53:25 +0800738};
739
Li Zefanba77c9e2009-11-20 15:53:25 +0800740static double fragmentation(unsigned long n_req, unsigned long n_alloc)
741{
742 if (n_alloc == 0)
743 return 0.0;
744 else
745 return 100.0 - (100.0 * n_req / n_alloc);
746}
747
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900748static void __print_slab_result(struct rb_root *root,
749 struct perf_session *session,
750 int n_lines, int is_caller)
Li Zefanba77c9e2009-11-20 15:53:25 +0800751{
752 struct rb_node *next;
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300753 struct machine *machine = &session->machines.host;
Li Zefanba77c9e2009-11-20 15:53:25 +0800754
Namhyung Kim65f46e02015-03-12 16:32:48 +0900755 printf("%.105s\n", graph_dotted_line);
Li Zefan079d3f62009-11-24 13:26:55 +0800756 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
Pekka Enberg47103272010-01-19 19:23:23 +0200757 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
Namhyung Kim65f46e02015-03-12 16:32:48 +0900758 printf("%.105s\n", graph_dotted_line);
Li Zefanba77c9e2009-11-20 15:53:25 +0800759
760 next = rb_first(root);
761
762 while (next && n_lines--) {
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200763 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
764 node);
765 struct symbol *sym = NULL;
Arnaldo Carvalho de Melo71cf8b82010-04-01 21:24:38 -0300766 struct map *map;
Li Zefan079d3f62009-11-24 13:26:55 +0800767 char buf[BUFSIZ];
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200768 u64 addr;
Li Zefanba77c9e2009-11-20 15:53:25 +0800769
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200770 if (is_caller) {
771 addr = data->call_site;
Li Zefan7707b6b2009-11-24 13:25:48 +0800772 if (!raw_ip)
Arnaldo Carvalho de Melo5c0541d2010-04-29 15:25:23 -0300773 sym = machine__find_kernel_function(machine, addr, &map, NULL);
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200774 } else
775 addr = data->ptr;
Li Zefanba77c9e2009-11-20 15:53:25 +0800776
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200777 if (sym != NULL)
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200778 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
Arnaldo Carvalho de Melo71cf8b82010-04-01 21:24:38 -0300779 addr - map->unmap_ip(map, sym->start));
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200780 else
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200781 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
Li Zefan079d3f62009-11-24 13:26:55 +0800782 printf(" %-34s |", buf);
Arnaldo Carvalho de Melo1b145ae2009-11-23 17:51:09 -0200783
Namhyung Kim65f46e02015-03-12 16:32:48 +0900784 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
Li Zefan079d3f62009-11-24 13:26:55 +0800785 (unsigned long long)data->bytes_alloc,
Li Zefanba77c9e2009-11-20 15:53:25 +0800786 (unsigned long)data->bytes_alloc / data->hit,
787 (unsigned long long)data->bytes_req,
788 (unsigned long)data->bytes_req / data->hit,
789 (unsigned long)data->hit,
Li Zefan079d3f62009-11-24 13:26:55 +0800790 (unsigned long)data->pingpong,
Li Zefanba77c9e2009-11-20 15:53:25 +0800791 fragmentation(data->bytes_req, data->bytes_alloc));
792
793 next = rb_next(next);
794 }
795
796 if (n_lines == -1)
Namhyung Kim65f46e02015-03-12 16:32:48 +0900797 printf(" ... | ... | ... | ... | ... | ... \n");
Li Zefanba77c9e2009-11-20 15:53:25 +0800798
Namhyung Kim65f46e02015-03-12 16:32:48 +0900799 printf("%.105s\n", graph_dotted_line);
Li Zefanba77c9e2009-11-20 15:53:25 +0800800}
801
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900802static const char * const migrate_type_str[] = {
803 "UNMOVABL",
804 "RECLAIM",
805 "MOVABLE",
806 "RESERVED",
807 "CMA/ISLT",
808 "UNKNOWN",
809};
810
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900811static void __print_page_alloc_result(struct perf_session *session, int n_lines)
Li Zefanba77c9e2009-11-20 15:53:25 +0800812{
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900813 struct rb_node *next = rb_first(&page_alloc_sorted);
814 struct machine *machine = &session->machines.host;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900815 const char *format;
816
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900817 printf("\n%.105s\n", graph_dotted_line);
818 printf(" %-16s | Total alloc (KB) | Hits | Order | Mig.type | GFP flags | Callsite\n",
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900819 use_pfn ? "PFN" : "Page");
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900820 printf("%.105s\n", graph_dotted_line);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900821
822 if (use_pfn)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900823 format = " %16llu | %'16llu | %'9d | %5d | %8s | %08lx | %s\n";
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900824 else
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900825 format = " %016llx | %'16llu | %'9d | %5d | %8s | %08lx | %s\n";
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900826
827 while (next && n_lines--) {
828 struct page_stat *data;
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900829 struct symbol *sym;
830 struct map *map;
831 char buf[32];
832 char *caller = buf;
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900833
834 data = rb_entry(next, struct page_stat, node);
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900835 sym = machine__find_kernel_function(machine, data->callsite,
836 &map, NULL);
837 if (sym && sym->name)
838 caller = sym->name;
839 else
840 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900841
842 printf(format, (unsigned long long)data->page,
843 (unsigned long long)data->alloc_bytes / 1024,
844 data->nr_alloc, data->order,
845 migrate_type_str[data->migrate_type],
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900846 (unsigned long)data->gfp_flags, caller);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900847
848 next = rb_next(next);
849 }
850
851 if (n_lines == -1)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900852 printf(" ... | ... | ... | ... | ... | ... | ...\n");
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900853
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900854 printf("%.105s\n", graph_dotted_line);
855}
856
857static void __print_page_caller_result(struct perf_session *session, int n_lines)
858{
859 struct rb_node *next = rb_first(&page_caller_sorted);
860 struct machine *machine = &session->machines.host;
861
862 printf("\n%.105s\n", graph_dotted_line);
863 printf(" Total alloc (KB) | Hits | Order | Mig.type | GFP flags | Callsite\n");
864 printf("%.105s\n", graph_dotted_line);
865
866 while (next && n_lines--) {
867 struct page_stat *data;
868 struct symbol *sym;
869 struct map *map;
870 char buf[32];
871 char *caller = buf;
872
873 data = rb_entry(next, struct page_stat, node);
874 sym = machine__find_kernel_function(machine, data->callsite,
875 &map, NULL);
876 if (sym && sym->name)
877 caller = sym->name;
878 else
879 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
880
881 printf(" %'16llu | %'9d | %5d | %8s | %08lx | %s\n",
882 (unsigned long long)data->alloc_bytes / 1024,
883 data->nr_alloc, data->order,
884 migrate_type_str[data->migrate_type],
885 (unsigned long)data->gfp_flags, caller);
886
887 next = rb_next(next);
888 }
889
890 if (n_lines == -1)
891 printf(" ... | ... | ... | ... | ... | ...\n");
892
893 printf("%.105s\n", graph_dotted_line);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900894}
895
896static void print_slab_summary(void)
897{
898 printf("\nSUMMARY (SLAB allocator)");
899 printf("\n========================\n");
Namhyung Kim77cfe382015-03-23 15:30:40 +0900900 printf("Total bytes requested: %'lu\n", total_requested);
901 printf("Total bytes allocated: %'lu\n", total_allocated);
902 printf("Total bytes wasted on internal fragmentation: %'lu\n",
Li Zefanba77c9e2009-11-20 15:53:25 +0800903 total_allocated - total_requested);
904 printf("Internal fragmentation: %f%%\n",
905 fragmentation(total_requested, total_allocated));
Namhyung Kim77cfe382015-03-23 15:30:40 +0900906 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
Li Zefanba77c9e2009-11-20 15:53:25 +0800907}
908
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900909static void print_page_summary(void)
910{
911 int o, m;
912 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
913 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
914
915 printf("\nSUMMARY (page allocator)");
916 printf("\n========================\n");
917 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
918 nr_page_allocs, total_page_alloc_bytes / 1024);
919 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
920 nr_page_frees, total_page_free_bytes / 1024);
921 printf("\n");
922
923 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
924 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
925 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
926 nr_page_allocs - nr_alloc_freed,
927 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
928 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
929 nr_page_nomatch, total_page_nomatch_bytes / 1024);
930 printf("\n");
931
932 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
933 nr_page_fails, total_page_fail_bytes / 1024);
934 printf("\n");
935
936 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
937 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
938 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
939 graph_dotted_line, graph_dotted_line, graph_dotted_line,
940 graph_dotted_line, graph_dotted_line);
941
942 for (o = 0; o < MAX_PAGE_ORDER; o++) {
943 printf("%5d", o);
944 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
945 if (order_stats[o][m])
946 printf(" %'12d", order_stats[o][m]);
947 else
948 printf(" %12c", '.');
949 }
950 printf("\n");
951 }
952}
953
954static void print_slab_result(struct perf_session *session)
Li Zefanba77c9e2009-11-20 15:53:25 +0800955{
956 if (caller_flag)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900957 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
Li Zefanba77c9e2009-11-20 15:53:25 +0800958 if (alloc_flag)
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900959 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
960 print_slab_summary();
961}
962
963static void print_page_result(struct perf_session *session)
964{
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900965 if (caller_flag)
966 __print_page_caller_result(session, caller_lines);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900967 if (alloc_flag)
Namhyung Kimc9758cc2015-04-21 13:55:02 +0900968 __print_page_alloc_result(session, alloc_lines);
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900969 print_page_summary();
970}
971
972static void print_result(struct perf_session *session)
973{
974 if (kmem_slab)
975 print_slab_result(session);
976 if (kmem_page)
977 print_page_result(session);
Li Zefanba77c9e2009-11-20 15:53:25 +0800978}
979
Namhyung Kimfb4f3132015-04-21 13:55:03 +0900980static LIST_HEAD(slab_caller_sort);
981static LIST_HEAD(slab_alloc_sort);
982static LIST_HEAD(page_caller_sort);
983static LIST_HEAD(page_alloc_sort);
Li Zefan29b3e152009-11-24 13:26:10 +0800984
Namhyung Kim0d68bc92015-04-06 14:36:10 +0900985static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
986 struct list_head *sort_list)
Li Zefanba77c9e2009-11-20 15:53:25 +0800987{
988 struct rb_node **new = &(root->rb_node);
989 struct rb_node *parent = NULL;
Li Zefan29b3e152009-11-24 13:26:10 +0800990 struct sort_dimension *sort;
Li Zefanba77c9e2009-11-20 15:53:25 +0800991
992 while (*new) {
993 struct alloc_stat *this;
Li Zefan29b3e152009-11-24 13:26:10 +0800994 int cmp = 0;
Li Zefanba77c9e2009-11-20 15:53:25 +0800995
996 this = rb_entry(*new, struct alloc_stat, node);
997 parent = *new;
998
Li Zefan29b3e152009-11-24 13:26:10 +0800999 list_for_each_entry(sort, sort_list, list) {
1000 cmp = sort->cmp(data, this);
1001 if (cmp)
1002 break;
1003 }
Li Zefanba77c9e2009-11-20 15:53:25 +08001004
1005 if (cmp > 0)
1006 new = &((*new)->rb_left);
1007 else
1008 new = &((*new)->rb_right);
1009 }
1010
1011 rb_link_node(&data->node, parent, new);
1012 rb_insert_color(&data->node, root);
1013}
1014
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001015static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1016 struct list_head *sort_list)
Li Zefanba77c9e2009-11-20 15:53:25 +08001017{
1018 struct rb_node *node;
1019 struct alloc_stat *data;
1020
1021 for (;;) {
1022 node = rb_first(root);
1023 if (!node)
1024 break;
1025
1026 rb_erase(node, root);
1027 data = rb_entry(node, struct alloc_stat, node);
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001028 sort_slab_insert(root_sorted, data, sort_list);
1029 }
1030}
1031
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001032static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1033 struct list_head *sort_list)
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001034{
1035 struct rb_node **new = &root->rb_node;
1036 struct rb_node *parent = NULL;
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001037 struct sort_dimension *sort;
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001038
1039 while (*new) {
1040 struct page_stat *this;
1041 int cmp = 0;
1042
1043 this = rb_entry(*new, struct page_stat, node);
1044 parent = *new;
1045
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001046 list_for_each_entry(sort, sort_list, list) {
1047 cmp = sort->cmp(data, this);
1048 if (cmp)
1049 break;
1050 }
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001051
1052 if (cmp > 0)
1053 new = &parent->rb_left;
1054 else
1055 new = &parent->rb_right;
1056 }
1057
1058 rb_link_node(&data->node, parent, new);
1059 rb_insert_color(&data->node, root);
1060}
1061
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001062static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1063 struct list_head *sort_list)
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001064{
1065 struct rb_node *node;
1066 struct page_stat *data;
1067
1068 for (;;) {
1069 node = rb_first(root);
1070 if (!node)
1071 break;
1072
1073 rb_erase(node, root);
1074 data = rb_entry(node, struct page_stat, node);
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001075 sort_page_insert(root_sorted, data, sort_list);
Li Zefanba77c9e2009-11-20 15:53:25 +08001076 }
1077}
1078
1079static void sort_result(void)
1080{
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001081 if (kmem_slab) {
1082 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001083 &slab_alloc_sort);
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001084 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001085 &slab_caller_sort);
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001086 }
1087 if (kmem_page) {
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001088 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1089 &page_alloc_sort);
1090 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1091 &page_caller_sort);
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001092 }
Li Zefanba77c9e2009-11-20 15:53:25 +08001093}
1094
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001095static int __cmd_kmem(struct perf_session *session)
Li Zefanba77c9e2009-11-20 15:53:25 +08001096{
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -02001097 int err = -EINVAL;
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001098 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03001099 const struct perf_evsel_str_handler kmem_tracepoints[] = {
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001100 /* slab allocator */
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03001101 { "kmem:kmalloc", perf_evsel__process_alloc_event, },
1102 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
1103 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
1104 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1105 { "kmem:kfree", perf_evsel__process_free_event, },
1106 { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001107 /* page allocator */
1108 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
1109 { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03001110 };
Li Zefanba77c9e2009-11-20 15:53:25 +08001111
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -02001112 if (!perf_session__has_traces(session, "kmem record"))
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001113 goto out;
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -02001114
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03001115 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1116 pr_err("Initializing perf session tracepoint handlers failed\n");
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001117 goto out;
Arnaldo Carvalho de Melo0f7d2f12012-09-24 10:46:54 -03001118 }
1119
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001120 evlist__for_each(session->evlist, evsel) {
1121 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1122 perf_evsel__field(evsel, "pfn")) {
1123 use_pfn = true;
1124 break;
1125 }
1126 }
1127
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -02001128 setup_pager();
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001129 err = perf_session__process_events(session);
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001130 if (err != 0) {
1131 pr_err("error during process events: %d\n", err);
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001132 goto out;
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001133 }
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -02001134 sort_result();
1135 print_result(session);
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001136out:
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -02001137 return err;
Li Zefanba77c9e2009-11-20 15:53:25 +08001138}
1139
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001140/* slab sort keys */
1141static int ptr_cmp(void *a, void *b)
Li Zefanba77c9e2009-11-20 15:53:25 +08001142{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001143 struct alloc_stat *l = a;
1144 struct alloc_stat *r = b;
1145
Li Zefanba77c9e2009-11-20 15:53:25 +08001146 if (l->ptr < r->ptr)
1147 return -1;
1148 else if (l->ptr > r->ptr)
1149 return 1;
1150 return 0;
1151}
1152
Li Zefan29b3e152009-11-24 13:26:10 +08001153static struct sort_dimension ptr_sort_dimension = {
1154 .name = "ptr",
1155 .cmp = ptr_cmp,
1156};
1157
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001158static int slab_callsite_cmp(void *a, void *b)
Li Zefanba77c9e2009-11-20 15:53:25 +08001159{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001160 struct alloc_stat *l = a;
1161 struct alloc_stat *r = b;
1162
Li Zefanba77c9e2009-11-20 15:53:25 +08001163 if (l->call_site < r->call_site)
1164 return -1;
1165 else if (l->call_site > r->call_site)
1166 return 1;
1167 return 0;
1168}
1169
Li Zefan29b3e152009-11-24 13:26:10 +08001170static struct sort_dimension callsite_sort_dimension = {
1171 .name = "callsite",
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001172 .cmp = slab_callsite_cmp,
Li Zefan29b3e152009-11-24 13:26:10 +08001173};
1174
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001175static int hit_cmp(void *a, void *b)
Pekka Enbergf3ced7c2009-11-22 11:58:00 +02001176{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001177 struct alloc_stat *l = a;
1178 struct alloc_stat *r = b;
1179
Pekka Enbergf3ced7c2009-11-22 11:58:00 +02001180 if (l->hit < r->hit)
1181 return -1;
1182 else if (l->hit > r->hit)
1183 return 1;
1184 return 0;
1185}
1186
Li Zefan29b3e152009-11-24 13:26:10 +08001187static struct sort_dimension hit_sort_dimension = {
1188 .name = "hit",
1189 .cmp = hit_cmp,
1190};
1191
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001192static int bytes_cmp(void *a, void *b)
Li Zefanba77c9e2009-11-20 15:53:25 +08001193{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001194 struct alloc_stat *l = a;
1195 struct alloc_stat *r = b;
1196
Li Zefanba77c9e2009-11-20 15:53:25 +08001197 if (l->bytes_alloc < r->bytes_alloc)
1198 return -1;
1199 else if (l->bytes_alloc > r->bytes_alloc)
1200 return 1;
1201 return 0;
1202}
1203
Li Zefan29b3e152009-11-24 13:26:10 +08001204static struct sort_dimension bytes_sort_dimension = {
1205 .name = "bytes",
1206 .cmp = bytes_cmp,
1207};
1208
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001209static int frag_cmp(void *a, void *b)
Pekka Enbergf3ced7c2009-11-22 11:58:00 +02001210{
1211 double x, y;
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001212 struct alloc_stat *l = a;
1213 struct alloc_stat *r = b;
Pekka Enbergf3ced7c2009-11-22 11:58:00 +02001214
1215 x = fragmentation(l->bytes_req, l->bytes_alloc);
1216 y = fragmentation(r->bytes_req, r->bytes_alloc);
1217
1218 if (x < y)
1219 return -1;
1220 else if (x > y)
1221 return 1;
1222 return 0;
1223}
1224
Li Zefan29b3e152009-11-24 13:26:10 +08001225static struct sort_dimension frag_sort_dimension = {
1226 .name = "frag",
1227 .cmp = frag_cmp,
1228};
1229
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001230static int pingpong_cmp(void *a, void *b)
Li Zefan079d3f62009-11-24 13:26:55 +08001231{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001232 struct alloc_stat *l = a;
1233 struct alloc_stat *r = b;
1234
Li Zefan079d3f62009-11-24 13:26:55 +08001235 if (l->pingpong < r->pingpong)
1236 return -1;
1237 else if (l->pingpong > r->pingpong)
1238 return 1;
1239 return 0;
1240}
1241
1242static struct sort_dimension pingpong_sort_dimension = {
1243 .name = "pingpong",
1244 .cmp = pingpong_cmp,
1245};
1246
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001247/* page sort keys */
1248static int page_cmp(void *a, void *b)
1249{
1250 struct page_stat *l = a;
1251 struct page_stat *r = b;
1252
1253 if (l->page < r->page)
1254 return -1;
1255 else if (l->page > r->page)
1256 return 1;
1257 return 0;
1258}
1259
1260static struct sort_dimension page_sort_dimension = {
1261 .name = "page",
1262 .cmp = page_cmp,
1263};
1264
1265static int page_callsite_cmp(void *a, void *b)
1266{
1267 struct page_stat *l = a;
1268 struct page_stat *r = b;
1269
1270 if (l->callsite < r->callsite)
1271 return -1;
1272 else if (l->callsite > r->callsite)
1273 return 1;
1274 return 0;
1275}
1276
1277static struct sort_dimension page_callsite_sort_dimension = {
1278 .name = "callsite",
1279 .cmp = page_callsite_cmp,
1280};
1281
1282static int page_hit_cmp(void *a, void *b)
1283{
1284 struct page_stat *l = a;
1285 struct page_stat *r = b;
1286
1287 if (l->nr_alloc < r->nr_alloc)
1288 return -1;
1289 else if (l->nr_alloc > r->nr_alloc)
1290 return 1;
1291 return 0;
1292}
1293
1294static struct sort_dimension page_hit_sort_dimension = {
1295 .name = "hit",
1296 .cmp = page_hit_cmp,
1297};
1298
1299static int page_bytes_cmp(void *a, void *b)
1300{
1301 struct page_stat *l = a;
1302 struct page_stat *r = b;
1303
1304 if (l->alloc_bytes < r->alloc_bytes)
1305 return -1;
1306 else if (l->alloc_bytes > r->alloc_bytes)
1307 return 1;
1308 return 0;
1309}
1310
1311static struct sort_dimension page_bytes_sort_dimension = {
1312 .name = "bytes",
1313 .cmp = page_bytes_cmp,
1314};
1315
1316static int page_order_cmp(void *a, void *b)
1317{
1318 struct page_stat *l = a;
1319 struct page_stat *r = b;
1320
1321 if (l->order < r->order)
1322 return -1;
1323 else if (l->order > r->order)
1324 return 1;
1325 return 0;
1326}
1327
1328static struct sort_dimension page_order_sort_dimension = {
1329 .name = "order",
1330 .cmp = page_order_cmp,
1331};
1332
1333static int migrate_type_cmp(void *a, void *b)
1334{
1335 struct page_stat *l = a;
1336 struct page_stat *r = b;
1337
1338 /* for internal use to find free'd page */
1339 if (l->migrate_type == -1U)
1340 return 0;
1341
1342 if (l->migrate_type < r->migrate_type)
1343 return -1;
1344 else if (l->migrate_type > r->migrate_type)
1345 return 1;
1346 return 0;
1347}
1348
1349static struct sort_dimension migrate_type_sort_dimension = {
1350 .name = "migtype",
1351 .cmp = migrate_type_cmp,
1352};
1353
1354static int gfp_flags_cmp(void *a, void *b)
1355{
1356 struct page_stat *l = a;
1357 struct page_stat *r = b;
1358
1359 /* for internal use to find free'd page */
1360 if (l->gfp_flags == -1U)
1361 return 0;
1362
1363 if (l->gfp_flags < r->gfp_flags)
1364 return -1;
1365 else if (l->gfp_flags > r->gfp_flags)
1366 return 1;
1367 return 0;
1368}
1369
1370static struct sort_dimension gfp_flags_sort_dimension = {
1371 .name = "gfp",
1372 .cmp = gfp_flags_cmp,
1373};
1374
1375static struct sort_dimension *slab_sorts[] = {
Li Zefan29b3e152009-11-24 13:26:10 +08001376 &ptr_sort_dimension,
1377 &callsite_sort_dimension,
1378 &hit_sort_dimension,
1379 &bytes_sort_dimension,
1380 &frag_sort_dimension,
Li Zefan079d3f62009-11-24 13:26:55 +08001381 &pingpong_sort_dimension,
Li Zefan29b3e152009-11-24 13:26:10 +08001382};
1383
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001384static struct sort_dimension *page_sorts[] = {
1385 &page_sort_dimension,
1386 &page_callsite_sort_dimension,
1387 &page_hit_sort_dimension,
1388 &page_bytes_sort_dimension,
1389 &page_order_sort_dimension,
1390 &migrate_type_sort_dimension,
1391 &gfp_flags_sort_dimension,
1392};
Li Zefan29b3e152009-11-24 13:26:10 +08001393
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001394static int slab_sort_dimension__add(const char *tok, struct list_head *list)
Li Zefan29b3e152009-11-24 13:26:10 +08001395{
1396 struct sort_dimension *sort;
1397 int i;
1398
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001399 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1400 if (!strcmp(slab_sorts[i]->name, tok)) {
1401 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -03001402 if (!sort) {
Arnaldo Carvalho de Melo8d9233f2013-01-24 22:24:57 -03001403 pr_err("%s: memdup failed\n", __func__);
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -03001404 return -1;
1405 }
Li Zefan29b3e152009-11-24 13:26:10 +08001406 list_add_tail(&sort->list, list);
1407 return 0;
1408 }
1409 }
1410
1411 return -1;
1412}
1413
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001414static int page_sort_dimension__add(const char *tok, struct list_head *list)
1415{
1416 struct sort_dimension *sort;
1417 int i;
1418
1419 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1420 if (!strcmp(page_sorts[i]->name, tok)) {
1421 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1422 if (!sort) {
1423 pr_err("%s: memdup failed\n", __func__);
1424 return -1;
1425 }
1426 list_add_tail(&sort->list, list);
1427 return 0;
1428 }
1429 }
1430
1431 return -1;
1432}
1433
1434static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
Li Zefan29b3e152009-11-24 13:26:10 +08001435{
1436 char *tok;
1437 char *str = strdup(arg);
Namhyung Kim405f8752015-03-12 16:32:46 +09001438 char *pos = str;
Li Zefan29b3e152009-11-24 13:26:10 +08001439
Arnaldo Carvalho de Melo2814eb02012-09-08 22:53:06 -03001440 if (!str) {
1441 pr_err("%s: strdup failed\n", __func__);
1442 return -1;
1443 }
Li Zefan29b3e152009-11-24 13:26:10 +08001444
1445 while (true) {
Namhyung Kim405f8752015-03-12 16:32:46 +09001446 tok = strsep(&pos, ",");
Li Zefan29b3e152009-11-24 13:26:10 +08001447 if (!tok)
1448 break;
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001449 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1450 error("Unknown slab --sort key: '%s'", tok);
1451 free(str);
1452 return -1;
1453 }
1454 }
1455
1456 free(str);
1457 return 0;
1458}
1459
1460static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1461{
1462 char *tok;
1463 char *str = strdup(arg);
1464 char *pos = str;
1465
1466 if (!str) {
1467 pr_err("%s: strdup failed\n", __func__);
1468 return -1;
1469 }
1470
1471 while (true) {
1472 tok = strsep(&pos, ",");
1473 if (!tok)
1474 break;
1475 if (page_sort_dimension__add(tok, sort_list) < 0) {
1476 error("Unknown page --sort key: '%s'", tok);
Namhyung Kim1b228592012-01-08 02:25:29 +09001477 free(str);
Li Zefan29b3e152009-11-24 13:26:10 +08001478 return -1;
1479 }
1480 }
1481
1482 free(str);
1483 return 0;
1484}
1485
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001486static int parse_sort_opt(const struct option *opt __maybe_unused,
1487 const char *arg, int unset __maybe_unused)
Li Zefanba77c9e2009-11-20 15:53:25 +08001488{
Li Zefanba77c9e2009-11-20 15:53:25 +08001489 if (!arg)
1490 return -1;
1491
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001492 if (kmem_page > kmem_slab) {
1493 if (caller_flag > alloc_flag)
1494 return setup_page_sorting(&page_caller_sort, arg);
1495 else
1496 return setup_page_sorting(&page_alloc_sort, arg);
1497 } else {
1498 if (caller_flag > alloc_flag)
1499 return setup_slab_sorting(&slab_caller_sort, arg);
1500 else
1501 return setup_slab_sorting(&slab_alloc_sort, arg);
1502 }
Li Zefanba77c9e2009-11-20 15:53:25 +08001503
1504 return 0;
1505}
1506
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001507static int parse_caller_opt(const struct option *opt __maybe_unused,
1508 const char *arg __maybe_unused,
1509 int unset __maybe_unused)
Li Zefanba77c9e2009-11-20 15:53:25 +08001510{
Li Zefan90b86a92009-12-10 15:21:57 +08001511 caller_flag = (alloc_flag + 1);
1512 return 0;
1513}
Li Zefanba77c9e2009-11-20 15:53:25 +08001514
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001515static int parse_alloc_opt(const struct option *opt __maybe_unused,
1516 const char *arg __maybe_unused,
1517 int unset __maybe_unused)
Li Zefan90b86a92009-12-10 15:21:57 +08001518{
1519 alloc_flag = (caller_flag + 1);
Li Zefanba77c9e2009-11-20 15:53:25 +08001520 return 0;
1521}
1522
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001523static int parse_slab_opt(const struct option *opt __maybe_unused,
1524 const char *arg __maybe_unused,
1525 int unset __maybe_unused)
1526{
1527 kmem_slab = (kmem_page + 1);
1528 return 0;
1529}
1530
1531static int parse_page_opt(const struct option *opt __maybe_unused,
1532 const char *arg __maybe_unused,
1533 int unset __maybe_unused)
1534{
1535 kmem_page = (kmem_slab + 1);
1536 return 0;
1537}
1538
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001539static int parse_line_opt(const struct option *opt __maybe_unused,
1540 const char *arg, int unset __maybe_unused)
Li Zefanba77c9e2009-11-20 15:53:25 +08001541{
1542 int lines;
1543
1544 if (!arg)
1545 return -1;
1546
1547 lines = strtoul(arg, NULL, 10);
1548
1549 if (caller_flag > alloc_flag)
1550 caller_lines = lines;
1551 else
1552 alloc_lines = lines;
1553
1554 return 0;
1555}
1556
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001557static int __cmd_record(int argc, const char **argv)
1558{
1559 const char * const record_args[] = {
Jiri Olsa4a4d3712013-06-05 13:37:21 +02001560 "record", "-a", "-R", "-c", "1",
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001561 };
1562 const char * const slab_events[] = {
Li Zefanba77c9e2009-11-20 15:53:25 +08001563 "-e", "kmem:kmalloc",
1564 "-e", "kmem:kmalloc_node",
1565 "-e", "kmem:kfree",
1566 "-e", "kmem:kmem_cache_alloc",
1567 "-e", "kmem:kmem_cache_alloc_node",
1568 "-e", "kmem:kmem_cache_free",
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001569 };
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001570 const char * const page_events[] = {
1571 "-e", "kmem:mm_page_alloc",
1572 "-e", "kmem:mm_page_free",
1573 };
Li Zefanba77c9e2009-11-20 15:53:25 +08001574 unsigned int rec_argc, i, j;
1575 const char **rec_argv;
1576
1577 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001578 if (kmem_slab)
1579 rec_argc += ARRAY_SIZE(slab_events);
1580 if (kmem_page)
Namhyung Kimc9758cc2015-04-21 13:55:02 +09001581 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001582
Li Zefanba77c9e2009-11-20 15:53:25 +08001583 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1584
Chris Samuelce47dc52010-11-13 13:35:06 +11001585 if (rec_argv == NULL)
1586 return -ENOMEM;
1587
Li Zefanba77c9e2009-11-20 15:53:25 +08001588 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1589 rec_argv[i] = strdup(record_args[i]);
1590
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001591 if (kmem_slab) {
1592 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1593 rec_argv[i] = strdup(slab_events[j]);
1594 }
1595 if (kmem_page) {
Namhyung Kimc9758cc2015-04-21 13:55:02 +09001596 rec_argv[i++] = strdup("-g");
1597
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001598 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1599 rec_argv[i] = strdup(page_events[j]);
1600 }
1601
Li Zefanba77c9e2009-11-20 15:53:25 +08001602 for (j = 1; j < (unsigned int)argc; j++, i++)
1603 rec_argv[i] = argv[j];
1604
1605 return cmd_record(i, rec_argv, NULL);
1606}
1607
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001608int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
Li Zefanba77c9e2009-11-20 15:53:25 +08001609{
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001610 const char * const default_slab_sort = "frag,hit,bytes";
1611 const char * const default_page_sort = "bytes,hit";
Yunlong Songd1eeb772015-04-02 21:47:12 +08001612 struct perf_data_file file = {
Yunlong Songd1eeb772015-04-02 21:47:12 +08001613 .mode = PERF_DATA_MODE_READ,
1614 };
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001615 const struct option kmem_options[] = {
1616 OPT_STRING('i', "input", &input_name, "file", "input file name"),
Namhyung Kimbd72a332015-03-12 16:32:47 +09001617 OPT_INCR('v', "verbose", &verbose,
1618 "be more verbose (show symbol address, etc)"),
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001619 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1620 "show per-callsite statistics", parse_caller_opt),
1621 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1622 "show per-allocation statistics", parse_alloc_opt),
1623 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001624 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1625 "page, order, migtype, gfp", parse_sort_opt),
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001626 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1627 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
Yunlong Songd1eeb772015-04-02 21:47:12 +08001628 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001629 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1630 parse_slab_opt),
1631 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1632 parse_page_opt),
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001633 OPT_END()
1634 };
Ramkumar Ramachandra3bca2352014-03-14 23:17:51 -04001635 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1636 const char *kmem_usage[] = {
1637 NULL,
Arnaldo Carvalho de Melo0433ffb2012-10-01 15:20:58 -03001638 NULL
1639 };
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001640 struct perf_session *session;
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001641 int ret = -1;
1642
Ramkumar Ramachandra3bca2352014-03-14 23:17:51 -04001643 argc = parse_options_subcommand(argc, argv, kmem_options,
1644 kmem_subcommands, kmem_usage, 0);
Li Zefanba77c9e2009-11-20 15:53:25 +08001645
Li Zefan90b86a92009-12-10 15:21:57 +08001646 if (!argc)
Li Zefanba77c9e2009-11-20 15:53:25 +08001647 usage_with_options(kmem_usage, kmem_options);
1648
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001649 if (kmem_slab == 0 && kmem_page == 0)
1650 kmem_slab = 1; /* for backward compatibility */
1651
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001652 if (!strncmp(argv[0], "rec", 3)) {
Namhyung Kim0a7e6d12014-08-12 15:40:45 +09001653 symbol__init(NULL);
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001654 return __cmd_record(argc, argv);
1655 }
1656
Jiri Olsa28939e12015-04-06 14:36:08 +09001657 file.path = input_name;
1658
Namhyung Kimc9758cc2015-04-21 13:55:02 +09001659 kmem_session = session = perf_session__new(&file, false, &perf_kmem);
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001660 if (session == NULL)
Taeung Song52e028342014-09-24 10:33:37 +09001661 return -1;
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001662
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001663 if (kmem_page) {
1664 struct perf_evsel *evsel = perf_evlist__first(session->evlist);
1665
1666 if (evsel == NULL || evsel->tp_format == NULL) {
1667 pr_err("invalid event found.. aborting\n");
1668 return -1;
1669 }
1670
1671 kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
Namhyung Kimc9758cc2015-04-21 13:55:02 +09001672 symbol_conf.use_callchain = true;
Namhyung Kim0d68bc92015-04-06 14:36:10 +09001673 }
1674
Namhyung Kim0a7e6d12014-08-12 15:40:45 +09001675 symbol__init(&session->header.env);
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001676
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001677 if (!strcmp(argv[0], "stat")) {
Namhyung Kim77cfe382015-03-23 15:30:40 +09001678 setlocale(LC_ALL, "");
1679
Don Zickus4b627952014-04-07 14:55:23 -04001680 if (cpu__setup_cpunode_map())
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001681 goto out_delete;
Li Zefanba77c9e2009-11-20 15:53:25 +08001682
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001683 if (list_empty(&slab_caller_sort))
1684 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1685 if (list_empty(&slab_alloc_sort))
1686 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1687 if (list_empty(&page_caller_sort))
1688 setup_page_sorting(&page_caller_sort, default_page_sort);
1689 if (list_empty(&page_alloc_sort))
1690 setup_page_sorting(&page_alloc_sort, default_page_sort);
Li Zefan7d0d3942009-11-24 13:26:31 +08001691
Namhyung Kimfb4f3132015-04-21 13:55:03 +09001692 if (kmem_page) {
1693 setup_page_sorting(&page_alloc_sort_input,
1694 "page,order,migtype,gfp");
1695 setup_page_sorting(&page_caller_sort_input,
1696 "callsite,order,migtype,gfp");
1697 }
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001698 ret = __cmd_kmem(session);
Pekka Enbergb00eca82010-01-19 19:26:11 +02001699 } else
1700 usage_with_options(kmem_usage, kmem_options);
Li Zefan90b86a92009-12-10 15:21:57 +08001701
Namhyung Kim2b2b2c62014-08-12 15:40:38 +09001702out_delete:
1703 perf_session__delete(session);
1704
1705 return ret;
Li Zefanba77c9e2009-11-20 15:53:25 +08001706}
1707