blob: 5d8aeae5000452c0e45da8a92f09b1b788b6b89d [file] [log] [blame]
Li Zefanba77c9e2009-11-20 15:53:25 +08001#include "builtin.h"
2#include "perf.h"
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
11#include "util/trace-event.h"
12
13#include "util/debug.h"
14#include "util/data_map.h"
15
16#include <linux/rbtree.h>
17
18struct alloc_stat;
19typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
20
21static char const *input_name = "perf.data";
22
23static struct perf_header *header;
24static u64 sample_type;
25
26static int alloc_flag;
27static int caller_flag;
28
29sort_fn_t alloc_sort_fn;
30sort_fn_t caller_sort_fn;
31
32static int alloc_lines = -1;
33static int caller_lines = -1;
34
35static char *cwd;
36static int cwdlen;
37
38struct alloc_stat {
39 union {
40 struct {
41 char *name;
42 u64 call_site;
43 };
44 u64 ptr;
45 };
46 u64 bytes_req;
47 u64 bytes_alloc;
48 u32 hit;
49
50 struct rb_node node;
51};
52
53static struct rb_root root_alloc_stat;
54static struct rb_root root_alloc_sorted;
55static struct rb_root root_caller_stat;
56static struct rb_root root_caller_sorted;
57
58static unsigned long total_requested, total_allocated;
59
60struct raw_event_sample {
61 u32 size;
62 char data[0];
63};
64
65static int
66process_comm_event(event_t *event, unsigned long offset, unsigned long head)
67{
68 struct thread *thread = threads__findnew(event->comm.pid);
69
70 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
71 (void *)(offset + head),
72 (void *)(long)(event->header.size),
73 event->comm.comm, event->comm.pid);
74
75 if (thread == NULL ||
76 thread__set_comm(thread, event->comm.comm)) {
77 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
78 return -1;
79 }
80
81 return 0;
82}
83
84static void insert_alloc_stat(unsigned long ptr,
85 int bytes_req, int bytes_alloc)
86{
87 struct rb_node **node = &root_alloc_stat.rb_node;
88 struct rb_node *parent = NULL;
89 struct alloc_stat *data = NULL;
90
91 if (!alloc_flag)
92 return;
93
94 while (*node) {
95 parent = *node;
96 data = rb_entry(*node, struct alloc_stat, node);
97
98 if (ptr > data->ptr)
99 node = &(*node)->rb_right;
100 else if (ptr < data->ptr)
101 node = &(*node)->rb_left;
102 else
103 break;
104 }
105
106 if (data && data->ptr == ptr) {
107 data->hit++;
108 data->bytes_req += bytes_req;
109 data->bytes_alloc += bytes_req;
110 } else {
111 data = malloc(sizeof(*data));
112 data->ptr = ptr;
113 data->hit = 1;
114 data->bytes_req = bytes_req;
115 data->bytes_alloc = bytes_alloc;
116
117 rb_link_node(&data->node, parent, node);
118 rb_insert_color(&data->node, &root_alloc_stat);
119 }
120}
121
122static void insert_caller_stat(unsigned long call_site,
123 int bytes_req, int bytes_alloc)
124{
125 struct rb_node **node = &root_caller_stat.rb_node;
126 struct rb_node *parent = NULL;
127 struct alloc_stat *data = NULL;
128
129 if (!caller_flag)
130 return;
131
132 while (*node) {
133 parent = *node;
134 data = rb_entry(*node, struct alloc_stat, node);
135
136 if (call_site > data->call_site)
137 node = &(*node)->rb_right;
138 else if (call_site < data->call_site)
139 node = &(*node)->rb_left;
140 else
141 break;
142 }
143
144 if (data && data->call_site == call_site) {
145 data->hit++;
146 data->bytes_req += bytes_req;
147 data->bytes_alloc += bytes_req;
148 } else {
149 data = malloc(sizeof(*data));
150 data->call_site = call_site;
151 data->hit = 1;
152 data->bytes_req = bytes_req;
153 data->bytes_alloc = bytes_alloc;
154
155 rb_link_node(&data->node, parent, node);
156 rb_insert_color(&data->node, &root_caller_stat);
157 }
158}
159
160static void process_alloc_event(struct raw_event_sample *raw,
161 struct event *event,
162 int cpu __used,
163 u64 timestamp __used,
164 struct thread *thread __used,
165 int node __used)
166{
167 unsigned long call_site;
168 unsigned long ptr;
169 int bytes_req;
170 int bytes_alloc;
171
172 ptr = raw_field_value(event, "ptr", raw->data);
173 call_site = raw_field_value(event, "call_site", raw->data);
174 bytes_req = raw_field_value(event, "bytes_req", raw->data);
175 bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
176
177 insert_alloc_stat(ptr, bytes_req, bytes_alloc);
178 insert_caller_stat(call_site, bytes_req, bytes_alloc);
179
180 total_requested += bytes_req;
181 total_allocated += bytes_alloc;
182}
183
184static void process_free_event(struct raw_event_sample *raw __used,
185 struct event *event __used,
186 int cpu __used,
187 u64 timestamp __used,
188 struct thread *thread __used)
189{
190}
191
192static void
193process_raw_event(event_t *raw_event __used, void *more_data,
194 int cpu, u64 timestamp, struct thread *thread)
195{
196 struct raw_event_sample *raw = more_data;
197 struct event *event;
198 int type;
199
200 type = trace_parse_common_type(raw->data);
201 event = trace_find_event(type);
202
203 if (!strcmp(event->name, "kmalloc") ||
204 !strcmp(event->name, "kmem_cache_alloc")) {
205 process_alloc_event(raw, event, cpu, timestamp, thread, 0);
206 return;
207 }
208
209 if (!strcmp(event->name, "kmalloc_node") ||
210 !strcmp(event->name, "kmem_cache_alloc_node")) {
211 process_alloc_event(raw, event, cpu, timestamp, thread, 1);
212 return;
213 }
214
215 if (!strcmp(event->name, "kfree") ||
216 !strcmp(event->name, "kmem_cache_free")) {
217 process_free_event(raw, event, cpu, timestamp, thread);
218 return;
219 }
220}
221
222static int
223process_sample_event(event_t *event, unsigned long offset, unsigned long head)
224{
225 u64 ip = event->ip.ip;
226 u64 timestamp = -1;
227 u32 cpu = -1;
228 u64 period = 1;
229 void *more_data = event->ip.__more_data;
230 struct thread *thread = threads__findnew(event->ip.pid);
231
232 if (sample_type & PERF_SAMPLE_TIME) {
233 timestamp = *(u64 *)more_data;
234 more_data += sizeof(u64);
235 }
236
237 if (sample_type & PERF_SAMPLE_CPU) {
238 cpu = *(u32 *)more_data;
239 more_data += sizeof(u32);
240 more_data += sizeof(u32); /* reserved */
241 }
242
243 if (sample_type & PERF_SAMPLE_PERIOD) {
244 period = *(u64 *)more_data;
245 more_data += sizeof(u64);
246 }
247
248 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
249 (void *)(offset + head),
250 (void *)(long)(event->header.size),
251 event->header.misc,
252 event->ip.pid, event->ip.tid,
253 (void *)(long)ip,
254 (long long)period);
255
256 if (thread == NULL) {
257 pr_debug("problem processing %d event, skipping it.\n",
258 event->header.type);
259 return -1;
260 }
261
262 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
263
264 process_raw_event(event, more_data, cpu, timestamp, thread);
265
266 return 0;
267}
268
269static int sample_type_check(u64 type)
270{
271 sample_type = type;
272
273 if (!(sample_type & PERF_SAMPLE_RAW)) {
274 fprintf(stderr,
275 "No trace sample to read. Did you call perf record "
276 "without -R?");
277 return -1;
278 }
279
280 return 0;
281}
282
283static struct perf_file_handler file_handler = {
284 .process_sample_event = process_sample_event,
285 .process_comm_event = process_comm_event,
286 .sample_type_check = sample_type_check,
287};
288
289static int read_events(void)
290{
291 register_idle_thread();
292 register_perf_file_handler(&file_handler);
293
Arnaldo Carvalho de Melocc612d82009-11-23 16:39:10 -0200294 return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0,
Li Zefanba77c9e2009-11-20 15:53:25 +0800295 &cwdlen, &cwd);
296}
297
298static double fragmentation(unsigned long n_req, unsigned long n_alloc)
299{
300 if (n_alloc == 0)
301 return 0.0;
302 else
303 return 100.0 - (100.0 * n_req / n_alloc);
304}
305
306static void __print_result(struct rb_root *root, int n_lines, int is_caller)
307{
308 struct rb_node *next;
309
310 printf("\n ------------------------------------------------------------------------------\n");
311 if (is_caller)
312 printf(" Callsite |");
313 else
314 printf(" Alloc Ptr |");
315 printf(" Total_alloc/Per | Total_req/Per | Hit | Fragmentation\n");
316 printf(" ------------------------------------------------------------------------------\n");
317
318 next = rb_first(root);
319
320 while (next && n_lines--) {
321 struct alloc_stat *data;
322
323 data = rb_entry(next, struct alloc_stat, node);
324
325 printf(" %-16p | %8llu/%-6lu | %8llu/%-6lu | %6lu | %8.3f%%\n",
326 is_caller ? (void *)(unsigned long)data->call_site :
327 (void *)(unsigned long)data->ptr,
328 (unsigned long long)data->bytes_alloc,
329 (unsigned long)data->bytes_alloc / data->hit,
330 (unsigned long long)data->bytes_req,
331 (unsigned long)data->bytes_req / data->hit,
332 (unsigned long)data->hit,
333 fragmentation(data->bytes_req, data->bytes_alloc));
334
335 next = rb_next(next);
336 }
337
338 if (n_lines == -1)
339 printf(" ... | ... | ... | ... | ... \n");
340
341 printf(" ------------------------------------------------------------------------------\n");
342}
343
344static void print_summary(void)
345{
346 printf("\nSUMMARY\n=======\n");
347 printf("Total bytes requested: %lu\n", total_requested);
348 printf("Total bytes allocated: %lu\n", total_allocated);
349 printf("Total bytes wasted on internal fragmentation: %lu\n",
350 total_allocated - total_requested);
351 printf("Internal fragmentation: %f%%\n",
352 fragmentation(total_requested, total_allocated));
353}
354
355static void print_result(void)
356{
357 if (caller_flag)
358 __print_result(&root_caller_sorted, caller_lines, 1);
359 if (alloc_flag)
360 __print_result(&root_alloc_sorted, alloc_lines, 0);
361 print_summary();
362}
363
364static void sort_insert(struct rb_root *root, struct alloc_stat *data,
365 sort_fn_t sort_fn)
366{
367 struct rb_node **new = &(root->rb_node);
368 struct rb_node *parent = NULL;
369
370 while (*new) {
371 struct alloc_stat *this;
372 int cmp;
373
374 this = rb_entry(*new, struct alloc_stat, node);
375 parent = *new;
376
377 cmp = sort_fn(data, this);
378
379 if (cmp > 0)
380 new = &((*new)->rb_left);
381 else
382 new = &((*new)->rb_right);
383 }
384
385 rb_link_node(&data->node, parent, new);
386 rb_insert_color(&data->node, root);
387}
388
389static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
390 sort_fn_t sort_fn)
391{
392 struct rb_node *node;
393 struct alloc_stat *data;
394
395 for (;;) {
396 node = rb_first(root);
397 if (!node)
398 break;
399
400 rb_erase(node, root);
401 data = rb_entry(node, struct alloc_stat, node);
402 sort_insert(root_sorted, data, sort_fn);
403 }
404}
405
406static void sort_result(void)
407{
408 __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn);
409 __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn);
410}
411
412static int __cmd_kmem(void)
413{
414 setup_pager();
415 read_events();
416 sort_result();
417 print_result();
418
419 return 0;
420}
421
422static const char * const kmem_usage[] = {
423 "perf kmem [<options>] {record}",
424 NULL
425};
426
427
428static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
429{
430 if (l->ptr < r->ptr)
431 return -1;
432 else if (l->ptr > r->ptr)
433 return 1;
434 return 0;
435}
436
437static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
438{
439 if (l->call_site < r->call_site)
440 return -1;
441 else if (l->call_site > r->call_site)
442 return 1;
443 return 0;
444}
445
Pekka Enbergf3ced7c2009-11-22 11:58:00 +0200446static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
447{
448 if (l->hit < r->hit)
449 return -1;
450 else if (l->hit > r->hit)
451 return 1;
452 return 0;
453}
454
Li Zefanba77c9e2009-11-20 15:53:25 +0800455static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
456{
457 if (l->bytes_alloc < r->bytes_alloc)
458 return -1;
459 else if (l->bytes_alloc > r->bytes_alloc)
460 return 1;
461 return 0;
462}
463
Pekka Enbergf3ced7c2009-11-22 11:58:00 +0200464static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
465{
466 double x, y;
467
468 x = fragmentation(l->bytes_req, l->bytes_alloc);
469 y = fragmentation(r->bytes_req, r->bytes_alloc);
470
471 if (x < y)
472 return -1;
473 else if (x > y)
474 return 1;
475 return 0;
476}
477
Li Zefanba77c9e2009-11-20 15:53:25 +0800478static int parse_sort_opt(const struct option *opt __used,
479 const char *arg, int unset __used)
480{
481 sort_fn_t sort_fn;
482
483 if (!arg)
484 return -1;
485
486 if (strcmp(arg, "ptr") == 0)
487 sort_fn = ptr_cmp;
488 else if (strcmp(arg, "call_site") == 0)
489 sort_fn = callsite_cmp;
Pekka Enbergf3ced7c2009-11-22 11:58:00 +0200490 else if (strcmp(arg, "hit") == 0)
491 sort_fn = hit_cmp;
Li Zefanba77c9e2009-11-20 15:53:25 +0800492 else if (strcmp(arg, "bytes") == 0)
493 sort_fn = bytes_cmp;
Pekka Enbergf3ced7c2009-11-22 11:58:00 +0200494 else if (strcmp(arg, "frag") == 0)
495 sort_fn = frag_cmp;
Li Zefanba77c9e2009-11-20 15:53:25 +0800496 else
497 return -1;
498
499 if (caller_flag > alloc_flag)
500 caller_sort_fn = sort_fn;
501 else
502 alloc_sort_fn = sort_fn;
503
504 return 0;
505}
506
507static int parse_stat_opt(const struct option *opt __used,
508 const char *arg, int unset __used)
509{
510 if (!arg)
511 return -1;
512
513 if (strcmp(arg, "alloc") == 0)
514 alloc_flag = (caller_flag + 1);
515 else if (strcmp(arg, "caller") == 0)
516 caller_flag = (alloc_flag + 1);
517 else
518 return -1;
519 return 0;
520}
521
522static int parse_line_opt(const struct option *opt __used,
523 const char *arg, int unset __used)
524{
525 int lines;
526
527 if (!arg)
528 return -1;
529
530 lines = strtoul(arg, NULL, 10);
531
532 if (caller_flag > alloc_flag)
533 caller_lines = lines;
534 else
535 alloc_lines = lines;
536
537 return 0;
538}
539
540static const struct option kmem_options[] = {
541 OPT_STRING('i', "input", &input_name, "file",
542 "input file name"),
543 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
544 "stat selector, Pass 'alloc' or 'caller'.",
545 parse_stat_opt),
546 OPT_CALLBACK('s', "sort", NULL, "key",
Pekka Enbergf3ced7c2009-11-22 11:58:00 +0200547 "sort by key: ptr, call_site, hit, bytes, frag",
Li Zefanba77c9e2009-11-20 15:53:25 +0800548 parse_sort_opt),
549 OPT_CALLBACK('l', "line", NULL, "num",
550 "show n lins",
551 parse_line_opt),
552 OPT_END()
553};
554
555static const char *record_args[] = {
556 "record",
557 "-a",
558 "-R",
559 "-M",
560 "-f",
561 "-c", "1",
562 "-e", "kmem:kmalloc",
563 "-e", "kmem:kmalloc_node",
564 "-e", "kmem:kfree",
565 "-e", "kmem:kmem_cache_alloc",
566 "-e", "kmem:kmem_cache_alloc_node",
567 "-e", "kmem:kmem_cache_free",
568};
569
570static int __cmd_record(int argc, const char **argv)
571{
572 unsigned int rec_argc, i, j;
573 const char **rec_argv;
574
575 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
576 rec_argv = calloc(rec_argc + 1, sizeof(char *));
577
578 for (i = 0; i < ARRAY_SIZE(record_args); i++)
579 rec_argv[i] = strdup(record_args[i]);
580
581 for (j = 1; j < (unsigned int)argc; j++, i++)
582 rec_argv[i] = argv[j];
583
584 return cmd_record(i, rec_argv, NULL);
585}
586
587int cmd_kmem(int argc, const char **argv, const char *prefix __used)
588{
589 symbol__init(0);
590
591 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
592
593 if (argc && !strncmp(argv[0], "rec", 3))
594 return __cmd_record(argc, argv);
595 else if (argc)
596 usage_with_options(kmem_usage, kmem_options);
597
598 if (!alloc_sort_fn)
599 alloc_sort_fn = bytes_cmp;
600 if (!caller_sort_fn)
601 caller_sort_fn = bytes_cmp;
602
603 return __cmd_kmem();
604}
605