blob: 63c5efaba1b5c611d3bb96da433d93415b3b87ca [file] [log] [blame]
Namhyung Kimf21d1812014-05-12 14:43:18 +09001#include "perf.h"
2#include "util/debug.h"
3#include "util/symbol.h"
4#include "util/sort.h"
5#include "util/evsel.h"
6#include "util/evlist.h"
7#include "util/machine.h"
8#include "util/thread.h"
9#include "util/parse-events.h"
10#include "tests/tests.h"
11#include "tests/hists_common.h"
12
13struct sample {
14 u32 cpu;
15 u32 pid;
16 u64 ip;
17 struct thread *thread;
18 struct map *map;
19 struct symbol *sym;
20};
21
22/* For the numbers, see hists_common.c */
23static struct sample fake_samples[] = {
24 /* perf [kernel] schedule() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090025 { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090026 /* perf [perf] main() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090027 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090028 /* perf [perf] cmd_record() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090029 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090030 /* perf [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090031 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090032 /* perf [libc] free() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090033 { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090034 /* perf [perf] main() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090035 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090036 /* perf [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090037 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090038 /* bash [bash] main() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090039 { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090040 /* bash [bash] xmalloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090041 { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090042 /* bash [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090043 { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf21d1812014-05-12 14:43:18 +090044};
45
46static int add_hist_entries(struct hists *hists, struct machine *machine)
47{
48 struct addr_location al;
Namhyung Kim69bcb012013-10-30 09:40:34 +090049 struct perf_evsel *evsel = hists_to_evsel(hists);
Namhyung Kimf21d1812014-05-12 14:43:18 +090050 struct perf_sample sample = { .period = 100, };
51 size_t i;
52
53 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
Namhyung Kim69bcb012013-10-30 09:40:34 +090054 struct hist_entry_iter iter = {
Namhyung Kim063bd932015-05-19 17:04:10 +090055 .evsel = evsel,
56 .sample = &sample,
Namhyung Kim69bcb012013-10-30 09:40:34 +090057 .ops = &hist_iter_normal,
58 .hide_unresolved = false,
59 };
Namhyung Kimf21d1812014-05-12 14:43:18 +090060
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -030061 sample.cpumode = PERF_RECORD_MISC_USER;
Namhyung Kimf21d1812014-05-12 14:43:18 +090062 sample.cpu = fake_samples[i].cpu;
63 sample.pid = fake_samples[i].pid;
64 sample.tid = fake_samples[i].pid;
65 sample.ip = fake_samples[i].ip;
66
Arnaldo Carvalho de Melobb3eb562016-03-22 18:39:09 -030067 if (machine__resolve(machine, &al, &sample) < 0)
Namhyung Kimf21d1812014-05-12 14:43:18 +090068 goto out;
69
Arnaldo Carvalho de Melo4cb93442016-04-27 10:16:24 -030070 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
Namhyung Kim063bd932015-05-19 17:04:10 +090071 NULL) < 0) {
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030072 addr_location__put(&al);
Namhyung Kimf21d1812014-05-12 14:43:18 +090073 goto out;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030074 }
Namhyung Kimf21d1812014-05-12 14:43:18 +090075
76 fake_samples[i].thread = al.thread;
77 fake_samples[i].map = al.map;
78 fake_samples[i].sym = al.sym;
79 }
80
81 return TEST_OK;
82
83out:
84 pr_debug("Not enough memory for adding a hist entry\n");
85 return TEST_FAIL;
86}
87
88static void del_hist_entries(struct hists *hists)
89{
90 struct hist_entry *he;
91 struct rb_root *root_in;
92 struct rb_root *root_out;
93 struct rb_node *node;
94
Jiri Olsa52225032016-05-03 13:54:42 +020095 if (hists__has(hists, need_collapse))
Namhyung Kimf21d1812014-05-12 14:43:18 +090096 root_in = &hists->entries_collapsed;
97 else
98 root_in = hists->entries_in;
99
100 root_out = &hists->entries;
101
102 while (!RB_EMPTY_ROOT(root_out)) {
103 node = rb_first(root_out);
104
105 he = rb_entry(node, struct hist_entry, rb_node);
106 rb_erase(node, root_out);
107 rb_erase(&he->rb_node_in, root_in);
Arnaldo Carvalho de Melo6733d1b2014-12-19 12:31:40 -0300108 hist_entry__delete(he);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900109 }
110}
111
112typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
113
114#define COMM(he) (thread__comm_str(he->thread))
115#define DSO(he) (he->ms.map->dso->short_name)
116#define SYM(he) (he->ms.sym->name)
117#define CPU(he) (he->cpu)
118#define PID(he) (he->thread->tid)
119
120/* default sort keys (no field) */
121static int test1(struct perf_evsel *evsel, struct machine *machine)
122{
123 int err;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300124 struct hists *hists = evsel__hists(evsel);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900125 struct hist_entry *he;
126 struct rb_root *root;
127 struct rb_node *node;
128
129 field_order = NULL;
130 sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
131
Namhyung Kim40184c42015-12-23 02:07:01 +0900132 setup_sorting(NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900133
134 /*
135 * expected output:
136 *
137 * Overhead Command Shared Object Symbol
138 * ======== ======= ============= ==============
139 * 20.00% perf perf [.] main
140 * 10.00% bash [kernel] [k] page_fault
141 * 10.00% bash bash [.] main
142 * 10.00% bash bash [.] xmalloc
143 * 10.00% perf [kernel] [k] page_fault
144 * 10.00% perf [kernel] [k] schedule
145 * 10.00% perf libc [.] free
146 * 10.00% perf libc [.] malloc
147 * 10.00% perf perf [.] cmd_record
148 */
149 err = add_hist_entries(hists, machine);
150 if (err < 0)
151 goto out;
152
153 hists__collapse_resort(hists, NULL);
Jiri Olsa452ce032016-01-18 10:24:00 +0100154 perf_evsel__output_resort(evsel, NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900155
156 if (verbose > 2) {
157 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
158 print_hists_out(hists);
159 }
160
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300161 root = &hists->entries;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900162 node = rb_first(root);
163 he = rb_entry(node, struct hist_entry, rb_node);
164 TEST_ASSERT_VAL("Invalid hist entry",
165 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
166 !strcmp(SYM(he), "main") && he->stat.period == 200);
167
168 node = rb_next(node);
169 he = rb_entry(node, struct hist_entry, rb_node);
170 TEST_ASSERT_VAL("Invalid hist entry",
171 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
172 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
173
174 node = rb_next(node);
175 he = rb_entry(node, struct hist_entry, rb_node);
176 TEST_ASSERT_VAL("Invalid hist entry",
177 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
178 !strcmp(SYM(he), "main") && he->stat.period == 100);
179
180 node = rb_next(node);
181 he = rb_entry(node, struct hist_entry, rb_node);
182 TEST_ASSERT_VAL("Invalid hist entry",
183 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
184 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
185
186 node = rb_next(node);
187 he = rb_entry(node, struct hist_entry, rb_node);
188 TEST_ASSERT_VAL("Invalid hist entry",
189 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
190 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
191
192 node = rb_next(node);
193 he = rb_entry(node, struct hist_entry, rb_node);
194 TEST_ASSERT_VAL("Invalid hist entry",
195 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
196 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
197
198 node = rb_next(node);
199 he = rb_entry(node, struct hist_entry, rb_node);
200 TEST_ASSERT_VAL("Invalid hist entry",
201 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
202 !strcmp(SYM(he), "free") && he->stat.period == 100);
203
204 node = rb_next(node);
205 he = rb_entry(node, struct hist_entry, rb_node);
206 TEST_ASSERT_VAL("Invalid hist entry",
207 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
208 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
209
210 node = rb_next(node);
211 he = rb_entry(node, struct hist_entry, rb_node);
212 TEST_ASSERT_VAL("Invalid hist entry",
213 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
214 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
215
216out:
217 del_hist_entries(hists);
218 reset_output_field();
219 return err;
220}
221
222/* mixed fields and sort keys */
223static int test2(struct perf_evsel *evsel, struct machine *machine)
224{
225 int err;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300226 struct hists *hists = evsel__hists(evsel);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900227 struct hist_entry *he;
228 struct rb_root *root;
229 struct rb_node *node;
230
231 field_order = "overhead,cpu";
232 sort_order = "pid";
233
Namhyung Kim40184c42015-12-23 02:07:01 +0900234 setup_sorting(NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900235
236 /*
237 * expected output:
238 *
239 * Overhead CPU Command: Pid
240 * ======== === =============
241 * 30.00% 1 perf : 100
242 * 10.00% 0 perf : 100
243 * 10.00% 2 perf : 100
244 * 20.00% 2 perf : 200
245 * 10.00% 0 bash : 300
246 * 10.00% 1 bash : 300
247 * 10.00% 3 bash : 300
248 */
249 err = add_hist_entries(hists, machine);
250 if (err < 0)
251 goto out;
252
253 hists__collapse_resort(hists, NULL);
Jiri Olsa452ce032016-01-18 10:24:00 +0100254 perf_evsel__output_resort(evsel, NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900255
256 if (verbose > 2) {
257 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
258 print_hists_out(hists);
259 }
260
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300261 root = &hists->entries;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900262 node = rb_first(root);
263 he = rb_entry(node, struct hist_entry, rb_node);
264 TEST_ASSERT_VAL("Invalid hist entry",
265 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
266
267 node = rb_next(node);
268 he = rb_entry(node, struct hist_entry, rb_node);
269 TEST_ASSERT_VAL("Invalid hist entry",
270 CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100);
271
272out:
273 del_hist_entries(hists);
274 reset_output_field();
275 return err;
276}
277
278/* fields only (no sort key) */
279static int test3(struct perf_evsel *evsel, struct machine *machine)
280{
281 int err;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300282 struct hists *hists = evsel__hists(evsel);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900283 struct hist_entry *he;
284 struct rb_root *root;
285 struct rb_node *node;
286
287 field_order = "comm,overhead,dso";
288 sort_order = NULL;
289
Namhyung Kim40184c42015-12-23 02:07:01 +0900290 setup_sorting(NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900291
292 /*
293 * expected output:
294 *
295 * Command Overhead Shared Object
296 * ======= ======== =============
297 * bash 20.00% bash
298 * bash 10.00% [kernel]
299 * perf 30.00% perf
300 * perf 20.00% [kernel]
301 * perf 20.00% libc
302 */
303 err = add_hist_entries(hists, machine);
304 if (err < 0)
305 goto out;
306
307 hists__collapse_resort(hists, NULL);
Jiri Olsa452ce032016-01-18 10:24:00 +0100308 perf_evsel__output_resort(evsel, NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900309
310 if (verbose > 2) {
311 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
312 print_hists_out(hists);
313 }
314
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300315 root = &hists->entries;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900316 node = rb_first(root);
317 he = rb_entry(node, struct hist_entry, rb_node);
318 TEST_ASSERT_VAL("Invalid hist entry",
319 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
320 he->stat.period == 200);
321
322 node = rb_next(node);
323 he = rb_entry(node, struct hist_entry, rb_node);
324 TEST_ASSERT_VAL("Invalid hist entry",
325 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
326 he->stat.period == 100);
327
328 node = rb_next(node);
329 he = rb_entry(node, struct hist_entry, rb_node);
330 TEST_ASSERT_VAL("Invalid hist entry",
331 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
332 he->stat.period == 300);
333
334 node = rb_next(node);
335 he = rb_entry(node, struct hist_entry, rb_node);
336 TEST_ASSERT_VAL("Invalid hist entry",
337 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
338 he->stat.period == 200);
339
340 node = rb_next(node);
341 he = rb_entry(node, struct hist_entry, rb_node);
342 TEST_ASSERT_VAL("Invalid hist entry",
343 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
344 he->stat.period == 200);
345
346out:
347 del_hist_entries(hists);
348 reset_output_field();
349 return err;
350}
351
352/* handle duplicate 'dso' field */
353static int test4(struct perf_evsel *evsel, struct machine *machine)
354{
355 int err;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300356 struct hists *hists = evsel__hists(evsel);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900357 struct hist_entry *he;
358 struct rb_root *root;
359 struct rb_node *node;
360
361 field_order = "dso,sym,comm,overhead,dso";
362 sort_order = "sym";
363
Namhyung Kim40184c42015-12-23 02:07:01 +0900364 setup_sorting(NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900365
366 /*
367 * expected output:
368 *
369 * Shared Object Symbol Command Overhead
370 * ============= ============== ======= ========
371 * perf [.] cmd_record perf 10.00%
372 * libc [.] free perf 10.00%
373 * bash [.] main bash 10.00%
374 * perf [.] main perf 20.00%
375 * libc [.] malloc perf 10.00%
376 * [kernel] [k] page_fault bash 10.00%
377 * [kernel] [k] page_fault perf 10.00%
378 * [kernel] [k] schedule perf 10.00%
379 * bash [.] xmalloc bash 10.00%
380 */
381 err = add_hist_entries(hists, machine);
382 if (err < 0)
383 goto out;
384
385 hists__collapse_resort(hists, NULL);
Jiri Olsa452ce032016-01-18 10:24:00 +0100386 perf_evsel__output_resort(evsel, NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900387
388 if (verbose > 2) {
389 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
390 print_hists_out(hists);
391 }
392
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300393 root = &hists->entries;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900394 node = rb_first(root);
395 he = rb_entry(node, struct hist_entry, rb_node);
396 TEST_ASSERT_VAL("Invalid hist entry",
397 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
398 !strcmp(COMM(he), "perf") && he->stat.period == 100);
399
400 node = rb_next(node);
401 he = rb_entry(node, struct hist_entry, rb_node);
402 TEST_ASSERT_VAL("Invalid hist entry",
403 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") &&
404 !strcmp(COMM(he), "perf") && he->stat.period == 100);
405
406 node = rb_next(node);
407 he = rb_entry(node, struct hist_entry, rb_node);
408 TEST_ASSERT_VAL("Invalid hist entry",
409 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") &&
410 !strcmp(COMM(he), "bash") && he->stat.period == 100);
411
412 node = rb_next(node);
413 he = rb_entry(node, struct hist_entry, rb_node);
414 TEST_ASSERT_VAL("Invalid hist entry",
415 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") &&
416 !strcmp(COMM(he), "perf") && he->stat.period == 200);
417
418 node = rb_next(node);
419 he = rb_entry(node, struct hist_entry, rb_node);
420 TEST_ASSERT_VAL("Invalid hist entry",
421 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") &&
422 !strcmp(COMM(he), "perf") && he->stat.period == 100);
423
424 node = rb_next(node);
425 he = rb_entry(node, struct hist_entry, rb_node);
426 TEST_ASSERT_VAL("Invalid hist entry",
427 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
428 !strcmp(COMM(he), "bash") && he->stat.period == 100);
429
430 node = rb_next(node);
431 he = rb_entry(node, struct hist_entry, rb_node);
432 TEST_ASSERT_VAL("Invalid hist entry",
433 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
434 !strcmp(COMM(he), "perf") && he->stat.period == 100);
435
436 node = rb_next(node);
437 he = rb_entry(node, struct hist_entry, rb_node);
438 TEST_ASSERT_VAL("Invalid hist entry",
439 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") &&
440 !strcmp(COMM(he), "perf") && he->stat.period == 100);
441
442 node = rb_next(node);
443 he = rb_entry(node, struct hist_entry, rb_node);
444 TEST_ASSERT_VAL("Invalid hist entry",
445 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") &&
446 !strcmp(COMM(he), "bash") && he->stat.period == 100);
447
448out:
449 del_hist_entries(hists);
450 reset_output_field();
451 return err;
452}
453
454/* full sort keys w/o overhead field */
455static int test5(struct perf_evsel *evsel, struct machine *machine)
456{
457 int err;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300458 struct hists *hists = evsel__hists(evsel);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900459 struct hist_entry *he;
460 struct rb_root *root;
461 struct rb_node *node;
462
463 field_order = "cpu,pid,comm,dso,sym";
464 sort_order = "dso,pid";
465
Namhyung Kim40184c42015-12-23 02:07:01 +0900466 setup_sorting(NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900467
468 /*
469 * expected output:
470 *
471 * CPU Command: Pid Command Shared Object Symbol
472 * === ============= ======= ============= ==============
473 * 0 perf: 100 perf [kernel] [k] schedule
474 * 2 perf: 200 perf [kernel] [k] page_fault
475 * 1 bash: 300 bash [kernel] [k] page_fault
476 * 0 bash: 300 bash bash [.] xmalloc
477 * 3 bash: 300 bash bash [.] main
478 * 1 perf: 100 perf libc [.] malloc
479 * 2 perf: 100 perf libc [.] free
480 * 1 perf: 100 perf perf [.] cmd_record
481 * 1 perf: 100 perf perf [.] main
482 * 2 perf: 200 perf perf [.] main
483 */
484 err = add_hist_entries(hists, machine);
485 if (err < 0)
486 goto out;
487
488 hists__collapse_resort(hists, NULL);
Jiri Olsa452ce032016-01-18 10:24:00 +0100489 perf_evsel__output_resort(evsel, NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900490
491 if (verbose > 2) {
492 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
493 print_hists_out(hists);
494 }
495
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300496 root = &hists->entries;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900497 node = rb_first(root);
498 he = rb_entry(node, struct hist_entry, rb_node);
499
500 TEST_ASSERT_VAL("Invalid hist entry",
501 CPU(he) == 0 && PID(he) == 100 &&
502 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
503 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
504
505 node = rb_next(node);
506 he = rb_entry(node, struct hist_entry, rb_node);
507 TEST_ASSERT_VAL("Invalid hist entry",
508 CPU(he) == 2 && PID(he) == 200 &&
509 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
510 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
511
512 node = rb_next(node);
513 he = rb_entry(node, struct hist_entry, rb_node);
514 TEST_ASSERT_VAL("Invalid hist entry",
515 CPU(he) == 1 && PID(he) == 300 &&
516 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
517 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
518
519 node = rb_next(node);
520 he = rb_entry(node, struct hist_entry, rb_node);
521 TEST_ASSERT_VAL("Invalid hist entry",
522 CPU(he) == 0 && PID(he) == 300 &&
523 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
524 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
525
526 node = rb_next(node);
527 he = rb_entry(node, struct hist_entry, rb_node);
528 TEST_ASSERT_VAL("Invalid hist entry",
529 CPU(he) == 3 && PID(he) == 300 &&
530 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
531 !strcmp(SYM(he), "main") && he->stat.period == 100);
532
533 node = rb_next(node);
534 he = rb_entry(node, struct hist_entry, rb_node);
535 TEST_ASSERT_VAL("Invalid hist entry",
536 CPU(he) == 1 && PID(he) == 100 &&
537 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
538 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
539
540 node = rb_next(node);
541 he = rb_entry(node, struct hist_entry, rb_node);
542 TEST_ASSERT_VAL("Invalid hist entry",
543 CPU(he) == 2 && PID(he) == 100 &&
544 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
545 !strcmp(SYM(he), "free") && he->stat.period == 100);
546
547 node = rb_next(node);
548 he = rb_entry(node, struct hist_entry, rb_node);
549 TEST_ASSERT_VAL("Invalid hist entry",
550 CPU(he) == 1 && PID(he) == 100 &&
551 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
552 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
553
554 node = rb_next(node);
555 he = rb_entry(node, struct hist_entry, rb_node);
556 TEST_ASSERT_VAL("Invalid hist entry",
557 CPU(he) == 1 && PID(he) == 100 &&
558 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
559 !strcmp(SYM(he), "main") && he->stat.period == 100);
560
561 node = rb_next(node);
562 he = rb_entry(node, struct hist_entry, rb_node);
563 TEST_ASSERT_VAL("Invalid hist entry",
564 CPU(he) == 2 && PID(he) == 200 &&
565 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
566 !strcmp(SYM(he), "main") && he->stat.period == 100);
567
568out:
569 del_hist_entries(hists);
570 reset_output_field();
571 return err;
572}
573
Arnaldo Carvalho de Melo721a1f52015-11-19 12:01:48 -0300574int test__hists_output(int subtest __maybe_unused)
Namhyung Kimf21d1812014-05-12 14:43:18 +0900575{
576 int err = TEST_FAIL;
577 struct machines machines;
578 struct machine *machine;
579 struct perf_evsel *evsel;
580 struct perf_evlist *evlist = perf_evlist__new();
581 size_t i;
582 test_fn_t testcases[] = {
583 test1,
584 test2,
585 test3,
586 test4,
587 test5,
588 };
589
590 TEST_ASSERT_VAL("No memory", evlist);
591
Jiri Olsab39b8392015-04-22 21:10:16 +0200592 err = parse_events(evlist, "cpu-clock", NULL);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900593 if (err)
594 goto out;
Wang Nanb0500c12016-01-11 13:48:03 +0000595 err = TEST_FAIL;
Namhyung Kimf21d1812014-05-12 14:43:18 +0900596
597 machines__init(&machines);
598
599 /* setup threads/dso/map/symbols also */
600 machine = setup_fake_machine(&machines);
601 if (!machine)
602 goto out;
603
604 if (verbose > 1)
605 machine__fprintf(machine, stderr);
606
607 evsel = perf_evlist__first(evlist);
608
609 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
610 err = testcases[i](evsel, machine);
611 if (err < 0)
612 break;
613 }
614
615out:
616 /* tear down everything */
617 perf_evlist__delete(evlist);
618 machines__exit(&machines);
619
620 return err;
621}