blob: 1bd26d23c2fcdf43bd909734116f827b94ad1d07 [file] [log] [blame]
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +09001#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
Namhyung Kim6e344a92014-04-25 12:28:13 +090011#include "hists_common.h"
Arnaldo Carvalho de Melo877a7a12017-04-17 11:39:06 -030012#include <linux/kernel.h>
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090013
14struct sample {
15 u32 pid;
16 u64 ip;
17 struct thread *thread;
18 struct map *map;
19 struct symbol *sym;
20};
21
Namhyung Kim6e344a92014-04-25 12:28:13 +090022/* For the numbers, see hists_common.c */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090023static struct sample fake_common_samples[] = {
24 /* perf [kernel] schedule() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090025 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090026 /* perf [perf] main() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090027 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090028 /* perf [perf] cmd_record() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090029 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090030 /* bash [bash] xmalloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090031 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090032 /* bash [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090033 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090034};
35
36static struct sample fake_samples[][5] = {
37 {
38 /* perf [perf] run_command() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090039 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090040 /* perf [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090041 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090042 /* perf [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090043 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090044 /* perf [kernel] sys_perf_event_open() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090045 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090046 /* bash [libc] free() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090047 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090048 },
49 {
50 /* perf [libc] free() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090051 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090052 /* bash [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090053 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090054 /* bash [bash] xfee() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090055 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090056 /* bash [libc] realloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090057 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090058 /* bash [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090059 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090060 },
61};
62
63static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
64{
65 struct perf_evsel *evsel;
66 struct addr_location al;
67 struct hist_entry *he;
Namhyung Kimfd36f3d2015-12-23 02:06:58 +090068 struct perf_sample sample = { .period = 1, .weight = 1, };
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090069 size_t i = 0, k;
70
71 /*
72 * each evsel will have 10 samples - 5 common and 5 distinct.
73 * However the second evsel also has a collapsed entry for
74 * "bash [libc] malloc" so total 9 entries will be in the tree.
75 */
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -030076 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -030077 struct hists *hists = evsel__hists(evsel);
78
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090079 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -030080 sample.cpumode = PERF_RECORD_MISC_USER;
Adrian Hunteref893252013-08-27 11:23:06 +030081 sample.pid = fake_common_samples[k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +090082 sample.tid = fake_common_samples[k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +030083 sample.ip = fake_common_samples[k].ip;
Arnaldo Carvalho de Melobb3eb562016-03-22 18:39:09 -030084
85 if (machine__resolve(machine, &al, &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090086 goto out;
87
Jiri Olsa0102ef32016-06-14 20:19:21 +020088 he = hists__add_entry(hists, &al, NULL,
Namhyung Kimfd36f3d2015-12-23 02:06:58 +090089 NULL, NULL, &sample, true);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030090 if (he == NULL) {
91 addr_location__put(&al);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090092 goto out;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030093 }
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090094
95 fake_common_samples[k].thread = al.thread;
96 fake_common_samples[k].map = al.map;
97 fake_common_samples[k].sym = al.sym;
98 }
99
100 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
Adrian Hunteref893252013-08-27 11:23:06 +0300101 sample.pid = fake_samples[i][k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +0900102 sample.tid = fake_samples[i][k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +0300103 sample.ip = fake_samples[i][k].ip;
Arnaldo Carvalho de Melobb3eb562016-03-22 18:39:09 -0300104 if (machine__resolve(machine, &al, &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900105 goto out;
106
Jiri Olsa0102ef32016-06-14 20:19:21 +0200107 he = hists__add_entry(hists, &al, NULL,
Namhyung Kimfd36f3d2015-12-23 02:06:58 +0900108 NULL, NULL, &sample, true);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300109 if (he == NULL) {
110 addr_location__put(&al);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900111 goto out;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300112 }
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900113
114 fake_samples[i][k].thread = al.thread;
115 fake_samples[i][k].map = al.map;
116 fake_samples[i][k].sym = al.sym;
117 }
118 i++;
119 }
120
121 return 0;
122
123out:
124 pr_debug("Not enough memory for adding a hist entry\n");
125 return -1;
126}
127
128static int find_sample(struct sample *samples, size_t nr_samples,
129 struct thread *t, struct map *m, struct symbol *s)
130{
131 while (nr_samples--) {
132 if (samples->thread == t && samples->map == m &&
133 samples->sym == s)
134 return 1;
135 samples++;
136 }
137 return 0;
138}
139
140static int __validate_match(struct hists *hists)
141{
142 size_t count = 0;
143 struct rb_root *root;
144 struct rb_node *node;
145
146 /*
147 * Only entries from fake_common_samples should have a pair.
148 */
Jiri Olsa52225032016-05-03 13:54:42 +0200149 if (hists__has(hists, need_collapse))
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900150 root = &hists->entries_collapsed;
151 else
152 root = hists->entries_in;
153
154 node = rb_first(root);
155 while (node) {
156 struct hist_entry *he;
157
158 he = rb_entry(node, struct hist_entry, rb_node_in);
159
160 if (hist_entry__has_pairs(he)) {
161 if (find_sample(fake_common_samples,
162 ARRAY_SIZE(fake_common_samples),
163 he->thread, he->ms.map, he->ms.sym)) {
164 count++;
165 } else {
166 pr_debug("Can't find the matched entry\n");
167 return -1;
168 }
169 }
170
171 node = rb_next(node);
172 }
173
174 if (count != ARRAY_SIZE(fake_common_samples)) {
175 pr_debug("Invalid count for matched entries: %zd of %zd\n",
176 count, ARRAY_SIZE(fake_common_samples));
177 return -1;
178 }
179
180 return 0;
181}
182
183static int validate_match(struct hists *leader, struct hists *other)
184{
185 return __validate_match(leader) || __validate_match(other);
186}
187
188static int __validate_link(struct hists *hists, int idx)
189{
190 size_t count = 0;
191 size_t count_pair = 0;
192 size_t count_dummy = 0;
193 struct rb_root *root;
194 struct rb_node *node;
195
196 /*
197 * Leader hists (idx = 0) will have dummy entries from other,
198 * and some entries will have no pair. However every entry
199 * in other hists should have (dummy) pair.
200 */
Jiri Olsa52225032016-05-03 13:54:42 +0200201 if (hists__has(hists, need_collapse))
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900202 root = &hists->entries_collapsed;
203 else
204 root = hists->entries_in;
205
206 node = rb_first(root);
207 while (node) {
208 struct hist_entry *he;
209
210 he = rb_entry(node, struct hist_entry, rb_node_in);
211
212 if (hist_entry__has_pairs(he)) {
213 if (!find_sample(fake_common_samples,
214 ARRAY_SIZE(fake_common_samples),
215 he->thread, he->ms.map, he->ms.sym) &&
216 !find_sample(fake_samples[idx],
217 ARRAY_SIZE(fake_samples[idx]),
218 he->thread, he->ms.map, he->ms.sym)) {
219 count_dummy++;
220 }
221 count_pair++;
222 } else if (idx) {
223 pr_debug("A entry from the other hists should have pair\n");
224 return -1;
225 }
226
227 count++;
228 node = rb_next(node);
229 }
230
231 /*
232 * Note that we have a entry collapsed in the other (idx = 1) hists.
233 */
234 if (idx == 0) {
235 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
236 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
237 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
238 return -1;
239 }
240 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
241 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
242 count, count_pair + ARRAY_SIZE(fake_samples[0]));
243 return -1;
244 }
245 } else {
246 if (count != count_pair) {
247 pr_debug("Invalid count of total other entries: %zd of %zd\n",
248 count, count_pair);
249 return -1;
250 }
251 if (count_dummy > 0) {
252 pr_debug("Other hists should not have dummy entries: %zd\n",
253 count_dummy);
254 return -1;
255 }
256 }
257
258 return 0;
259}
260
261static int validate_link(struct hists *leader, struct hists *other)
262{
263 return __validate_link(leader, 0) || __validate_link(other, 1);
264}
265
Arnaldo Carvalho de Melo721a1f52015-11-19 12:01:48 -0300266int test__hists_link(int subtest __maybe_unused)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900267{
268 int err = -1;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300269 struct hists *hists, *first_hists;
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300270 struct machines machines;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900271 struct machine *machine = NULL;
272 struct perf_evsel *evsel, *first;
Namhyung Kim334fe7a2013-03-11 16:43:12 +0900273 struct perf_evlist *evlist = perf_evlist__new();
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900274
275 if (evlist == NULL)
276 return -ENOMEM;
277
Jiri Olsab39b8392015-04-22 21:10:16 +0200278 err = parse_events(evlist, "cpu-clock", NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900279 if (err)
280 goto out;
Jiri Olsab39b8392015-04-22 21:10:16 +0200281 err = parse_events(evlist, "task-clock", NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900282 if (err)
283 goto out;
284
Wang Nanb0500c12016-01-11 13:48:03 +0000285 err = TEST_FAIL;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900286 /* default sort order (comm,dso,sym) will be used */
Namhyung Kim40184c42015-12-23 02:07:01 +0900287 if (setup_sorting(NULL) < 0)
Namhyung Kim55309982013-02-06 14:57:16 +0900288 goto out;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900289
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300290 machines__init(&machines);
291
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900292 /* setup threads/dso/map/symbols also */
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300293 machine = setup_fake_machine(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900294 if (!machine)
295 goto out;
296
297 if (verbose > 1)
298 machine__fprintf(machine, stderr);
299
300 /* process sample events */
301 err = add_hist_entries(evlist, machine);
302 if (err < 0)
303 goto out;
304
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300305 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300306 hists = evsel__hists(evsel);
307 hists__collapse_resort(hists, NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900308
309 if (verbose > 2)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300310 print_hists_in(hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900311 }
312
313 first = perf_evlist__first(evlist);
314 evsel = perf_evlist__last(evlist);
315
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300316 first_hists = evsel__hists(first);
317 hists = evsel__hists(evsel);
318
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900319 /* match common entries */
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300320 hists__match(first_hists, hists);
321 err = validate_match(first_hists, hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900322 if (err)
323 goto out;
324
325 /* link common and/or dummy entries */
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300326 hists__link(first_hists, hists);
327 err = validate_link(first_hists, hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900328 if (err)
329 goto out;
330
331 err = 0;
332
333out:
334 /* tear down everything */
335 perf_evlist__delete(evlist);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900336 reset_output_field();
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300337 machines__exit(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900338
339 return err;
340}