Adrian Hunter | d44bc55 | 2014-08-15 22:08:36 +0300 | [diff] [blame] | 1 | #include <sys/time.h> |
| 2 | #include <sys/prctl.h> |
| 3 | #include <time.h> |
| 4 | #include <stdlib.h> |
| 5 | |
| 6 | #include "parse-events.h" |
| 7 | #include "evlist.h" |
| 8 | #include "evsel.h" |
| 9 | #include "thread_map.h" |
| 10 | #include "cpumap.h" |
| 11 | #include "tests.h" |
| 12 | |
| 13 | static int spin_sleep(void) |
| 14 | { |
| 15 | struct timeval start, now, diff, maxtime; |
| 16 | struct timespec ts; |
| 17 | int err, i; |
| 18 | |
| 19 | maxtime.tv_sec = 0; |
| 20 | maxtime.tv_usec = 50000; |
| 21 | |
| 22 | err = gettimeofday(&start, NULL); |
| 23 | if (err) |
| 24 | return err; |
| 25 | |
| 26 | /* Spin for 50ms */ |
| 27 | while (1) { |
| 28 | for (i = 0; i < 1000; i++) |
| 29 | barrier(); |
| 30 | |
| 31 | err = gettimeofday(&now, NULL); |
| 32 | if (err) |
| 33 | return err; |
| 34 | |
| 35 | timersub(&now, &start, &diff); |
| 36 | if (timercmp(&diff, &maxtime, > /* For checkpatch */)) |
| 37 | break; |
| 38 | } |
| 39 | |
| 40 | ts.tv_nsec = 50 * 1000 * 1000; |
| 41 | ts.tv_sec = 0; |
| 42 | |
| 43 | /* Sleep for 50ms */ |
| 44 | err = nanosleep(&ts, NULL); |
| 45 | if (err == EINTR) |
| 46 | err = 0; |
| 47 | |
| 48 | return err; |
| 49 | } |
| 50 | |
| 51 | struct switch_tracking { |
| 52 | struct perf_evsel *switch_evsel; |
| 53 | struct perf_evsel *cycles_evsel; |
| 54 | pid_t *tids; |
| 55 | int nr_tids; |
| 56 | int comm_seen[4]; |
| 57 | int cycles_before_comm_1; |
| 58 | int cycles_between_comm_2_and_comm_3; |
| 59 | int cycles_after_comm_4; |
| 60 | }; |
| 61 | |
| 62 | static int check_comm(struct switch_tracking *switch_tracking, |
| 63 | union perf_event *event, const char *comm, int nr) |
| 64 | { |
| 65 | if (event->header.type == PERF_RECORD_COMM && |
| 66 | (pid_t)event->comm.pid == getpid() && |
| 67 | (pid_t)event->comm.tid == getpid() && |
| 68 | strcmp(event->comm.comm, comm) == 0) { |
| 69 | if (switch_tracking->comm_seen[nr]) { |
| 70 | pr_debug("Duplicate comm event\n"); |
| 71 | return -1; |
| 72 | } |
| 73 | switch_tracking->comm_seen[nr] = 1; |
| 74 | pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr); |
| 75 | return 1; |
| 76 | } |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | static int check_cpu(struct switch_tracking *switch_tracking, int cpu) |
| 81 | { |
| 82 | int i, nr = cpu + 1; |
| 83 | |
| 84 | if (cpu < 0) |
| 85 | return -1; |
| 86 | |
| 87 | if (!switch_tracking->tids) { |
| 88 | switch_tracking->tids = calloc(nr, sizeof(pid_t)); |
| 89 | if (!switch_tracking->tids) |
| 90 | return -1; |
| 91 | for (i = 0; i < nr; i++) |
| 92 | switch_tracking->tids[i] = -1; |
| 93 | switch_tracking->nr_tids = nr; |
| 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | if (cpu >= switch_tracking->nr_tids) { |
| 98 | void *addr; |
| 99 | |
| 100 | addr = realloc(switch_tracking->tids, nr * sizeof(pid_t)); |
| 101 | if (!addr) |
| 102 | return -1; |
| 103 | switch_tracking->tids = addr; |
| 104 | for (i = switch_tracking->nr_tids; i < nr; i++) |
| 105 | switch_tracking->tids[i] = -1; |
| 106 | switch_tracking->nr_tids = nr; |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | static int process_sample_event(struct perf_evlist *evlist, |
| 114 | union perf_event *event, |
| 115 | struct switch_tracking *switch_tracking) |
| 116 | { |
| 117 | struct perf_sample sample; |
| 118 | struct perf_evsel *evsel; |
| 119 | pid_t next_tid, prev_tid; |
| 120 | int cpu, err; |
| 121 | |
| 122 | if (perf_evlist__parse_sample(evlist, event, &sample)) { |
| 123 | pr_debug("perf_evlist__parse_sample failed\n"); |
| 124 | return -1; |
| 125 | } |
| 126 | |
| 127 | evsel = perf_evlist__id2evsel(evlist, sample.id); |
| 128 | if (evsel == switch_tracking->switch_evsel) { |
| 129 | next_tid = perf_evsel__intval(evsel, &sample, "next_pid"); |
| 130 | prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid"); |
| 131 | cpu = sample.cpu; |
| 132 | pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n", |
| 133 | cpu, prev_tid, next_tid); |
| 134 | err = check_cpu(switch_tracking, cpu); |
| 135 | if (err) |
| 136 | return err; |
| 137 | /* |
| 138 | * Check for no missing sched_switch events i.e. that the |
| 139 | * evsel->system_wide flag has worked. |
| 140 | */ |
| 141 | if (switch_tracking->tids[cpu] != -1 && |
| 142 | switch_tracking->tids[cpu] != prev_tid) { |
| 143 | pr_debug("Missing sched_switch events\n"); |
| 144 | return -1; |
| 145 | } |
| 146 | switch_tracking->tids[cpu] = next_tid; |
| 147 | } |
| 148 | |
| 149 | if (evsel == switch_tracking->cycles_evsel) { |
| 150 | pr_debug3("cycles event\n"); |
| 151 | if (!switch_tracking->comm_seen[0]) |
| 152 | switch_tracking->cycles_before_comm_1 = 1; |
| 153 | if (switch_tracking->comm_seen[1] && |
| 154 | !switch_tracking->comm_seen[2]) |
| 155 | switch_tracking->cycles_between_comm_2_and_comm_3 = 1; |
| 156 | if (switch_tracking->comm_seen[3]) |
| 157 | switch_tracking->cycles_after_comm_4 = 1; |
| 158 | } |
| 159 | |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | static int process_event(struct perf_evlist *evlist, union perf_event *event, |
| 164 | struct switch_tracking *switch_tracking) |
| 165 | { |
| 166 | if (event->header.type == PERF_RECORD_SAMPLE) |
| 167 | return process_sample_event(evlist, event, switch_tracking); |
| 168 | |
| 169 | if (event->header.type == PERF_RECORD_COMM) { |
| 170 | int err, done = 0; |
| 171 | |
| 172 | err = check_comm(switch_tracking, event, "Test COMM 1", 0); |
| 173 | if (err < 0) |
| 174 | return -1; |
| 175 | done += err; |
| 176 | err = check_comm(switch_tracking, event, "Test COMM 2", 1); |
| 177 | if (err < 0) |
| 178 | return -1; |
| 179 | done += err; |
| 180 | err = check_comm(switch_tracking, event, "Test COMM 3", 2); |
| 181 | if (err < 0) |
| 182 | return -1; |
| 183 | done += err; |
| 184 | err = check_comm(switch_tracking, event, "Test COMM 4", 3); |
| 185 | if (err < 0) |
| 186 | return -1; |
| 187 | done += err; |
| 188 | if (done != 1) { |
| 189 | pr_debug("Unexpected comm event\n"); |
| 190 | return -1; |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | struct event_node { |
| 198 | struct list_head list; |
| 199 | union perf_event *event; |
| 200 | u64 event_time; |
| 201 | }; |
| 202 | |
| 203 | static int add_event(struct perf_evlist *evlist, struct list_head *events, |
| 204 | union perf_event *event) |
| 205 | { |
| 206 | struct perf_sample sample; |
| 207 | struct event_node *node; |
| 208 | |
| 209 | node = malloc(sizeof(struct event_node)); |
| 210 | if (!node) { |
| 211 | pr_debug("malloc failed\n"); |
| 212 | return -1; |
| 213 | } |
| 214 | node->event = event; |
| 215 | list_add(&node->list, events); |
| 216 | |
| 217 | if (perf_evlist__parse_sample(evlist, event, &sample)) { |
| 218 | pr_debug("perf_evlist__parse_sample failed\n"); |
| 219 | return -1; |
| 220 | } |
| 221 | |
| 222 | if (!sample.time) { |
| 223 | pr_debug("event with no time\n"); |
| 224 | return -1; |
| 225 | } |
| 226 | |
| 227 | node->event_time = sample.time; |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static void free_event_nodes(struct list_head *events) |
| 233 | { |
| 234 | struct event_node *node; |
| 235 | |
| 236 | while (!list_empty(events)) { |
| 237 | node = list_entry(events->next, struct event_node, list); |
| 238 | list_del(&node->list); |
| 239 | free(node); |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | static int compar(const void *a, const void *b) |
| 244 | { |
| 245 | const struct event_node *nodea = a; |
| 246 | const struct event_node *nodeb = b; |
| 247 | s64 cmp = nodea->event_time - nodeb->event_time; |
| 248 | |
| 249 | return cmp; |
| 250 | } |
| 251 | |
| 252 | static int process_events(struct perf_evlist *evlist, |
| 253 | struct switch_tracking *switch_tracking) |
| 254 | { |
| 255 | union perf_event *event; |
| 256 | unsigned pos, cnt = 0; |
| 257 | LIST_HEAD(events); |
| 258 | struct event_node *events_array, *node; |
| 259 | int i, ret; |
| 260 | |
| 261 | for (i = 0; i < evlist->nr_mmaps; i++) { |
| 262 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { |
| 263 | cnt += 1; |
| 264 | ret = add_event(evlist, &events, event); |
| 265 | perf_evlist__mmap_consume(evlist, i); |
| 266 | if (ret < 0) |
| 267 | goto out_free_nodes; |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | events_array = calloc(cnt, sizeof(struct event_node)); |
| 272 | if (!events_array) { |
| 273 | pr_debug("calloc failed\n"); |
| 274 | ret = -1; |
| 275 | goto out_free_nodes; |
| 276 | } |
| 277 | |
| 278 | pos = 0; |
| 279 | list_for_each_entry(node, &events, list) |
| 280 | events_array[pos++] = *node; |
| 281 | |
| 282 | qsort(events_array, cnt, sizeof(struct event_node), compar); |
| 283 | |
| 284 | for (pos = 0; pos < cnt; pos++) { |
| 285 | ret = process_event(evlist, events_array[pos].event, |
| 286 | switch_tracking); |
| 287 | if (ret < 0) |
| 288 | goto out_free; |
| 289 | } |
| 290 | |
| 291 | ret = 0; |
| 292 | out_free: |
| 293 | pr_debug("%u events recorded\n", cnt); |
| 294 | free(events_array); |
| 295 | out_free_nodes: |
| 296 | free_event_nodes(&events); |
| 297 | return ret; |
| 298 | } |
| 299 | |
| 300 | /** |
| 301 | * test__switch_tracking - test using sched_switch and tracking events. |
| 302 | * |
| 303 | * This function implements a test that checks that sched_switch events and |
| 304 | * tracking events can be recorded for a workload (current process) using the |
| 305 | * evsel->system_wide and evsel->tracking flags (respectively) with other events |
| 306 | * sometimes enabled or disabled. |
| 307 | */ |
| 308 | int test__switch_tracking(void) |
| 309 | { |
| 310 | const char *sched_switch = "sched:sched_switch"; |
| 311 | struct switch_tracking switch_tracking = { .tids = NULL, }; |
| 312 | struct record_opts opts = { |
| 313 | .mmap_pages = UINT_MAX, |
| 314 | .user_freq = UINT_MAX, |
| 315 | .user_interval = ULLONG_MAX, |
| 316 | .freq = 4000, |
| 317 | .target = { |
| 318 | .uses_mmap = true, |
| 319 | }, |
| 320 | }; |
| 321 | struct thread_map *threads = NULL; |
| 322 | struct cpu_map *cpus = NULL; |
| 323 | struct perf_evlist *evlist = NULL; |
| 324 | struct perf_evsel *evsel, *cpu_clocks_evsel, *cycles_evsel; |
| 325 | struct perf_evsel *switch_evsel, *tracking_evsel; |
| 326 | const char *comm; |
| 327 | int err = -1; |
| 328 | |
| 329 | threads = thread_map__new(-1, getpid(), UINT_MAX); |
| 330 | if (!threads) { |
| 331 | pr_debug("thread_map__new failed!\n"); |
| 332 | goto out_err; |
| 333 | } |
| 334 | |
| 335 | cpus = cpu_map__new(NULL); |
| 336 | if (!cpus) { |
| 337 | pr_debug("cpu_map__new failed!\n"); |
| 338 | goto out_err; |
| 339 | } |
| 340 | |
| 341 | evlist = perf_evlist__new(); |
| 342 | if (!evlist) { |
| 343 | pr_debug("perf_evlist__new failed!\n"); |
| 344 | goto out_err; |
| 345 | } |
| 346 | |
| 347 | perf_evlist__set_maps(evlist, cpus, threads); |
| 348 | |
| 349 | /* First event */ |
Jiri Olsa | b39b839 | 2015-04-22 21:10:16 +0200 | [diff] [blame^] | 350 | err = parse_events(evlist, "cpu-clock:u", NULL); |
Adrian Hunter | d44bc55 | 2014-08-15 22:08:36 +0300 | [diff] [blame] | 351 | if (err) { |
| 352 | pr_debug("Failed to parse event dummy:u\n"); |
| 353 | goto out_err; |
| 354 | } |
| 355 | |
| 356 | cpu_clocks_evsel = perf_evlist__last(evlist); |
| 357 | |
| 358 | /* Second event */ |
Jiri Olsa | b39b839 | 2015-04-22 21:10:16 +0200 | [diff] [blame^] | 359 | err = parse_events(evlist, "cycles:u", NULL); |
Adrian Hunter | d44bc55 | 2014-08-15 22:08:36 +0300 | [diff] [blame] | 360 | if (err) { |
| 361 | pr_debug("Failed to parse event cycles:u\n"); |
| 362 | goto out_err; |
| 363 | } |
| 364 | |
| 365 | cycles_evsel = perf_evlist__last(evlist); |
| 366 | |
| 367 | /* Third event */ |
| 368 | if (!perf_evlist__can_select_event(evlist, sched_switch)) { |
| 369 | fprintf(stderr, " (no sched_switch)"); |
| 370 | err = 0; |
| 371 | goto out; |
| 372 | } |
| 373 | |
Jiri Olsa | b39b839 | 2015-04-22 21:10:16 +0200 | [diff] [blame^] | 374 | err = parse_events(evlist, sched_switch, NULL); |
Adrian Hunter | d44bc55 | 2014-08-15 22:08:36 +0300 | [diff] [blame] | 375 | if (err) { |
| 376 | pr_debug("Failed to parse event %s\n", sched_switch); |
| 377 | goto out_err; |
| 378 | } |
| 379 | |
| 380 | switch_evsel = perf_evlist__last(evlist); |
| 381 | |
| 382 | perf_evsel__set_sample_bit(switch_evsel, CPU); |
| 383 | perf_evsel__set_sample_bit(switch_evsel, TIME); |
| 384 | |
| 385 | switch_evsel->system_wide = true; |
| 386 | switch_evsel->no_aux_samples = true; |
| 387 | switch_evsel->immediate = true; |
| 388 | |
| 389 | /* Test moving an event to the front */ |
| 390 | if (cycles_evsel == perf_evlist__first(evlist)) { |
| 391 | pr_debug("cycles event already at front"); |
| 392 | goto out_err; |
| 393 | } |
| 394 | perf_evlist__to_front(evlist, cycles_evsel); |
| 395 | if (cycles_evsel != perf_evlist__first(evlist)) { |
| 396 | pr_debug("Failed to move cycles event to front"); |
| 397 | goto out_err; |
| 398 | } |
| 399 | |
| 400 | perf_evsel__set_sample_bit(cycles_evsel, CPU); |
| 401 | perf_evsel__set_sample_bit(cycles_evsel, TIME); |
| 402 | |
| 403 | /* Fourth event */ |
Jiri Olsa | b39b839 | 2015-04-22 21:10:16 +0200 | [diff] [blame^] | 404 | err = parse_events(evlist, "dummy:u", NULL); |
Adrian Hunter | d44bc55 | 2014-08-15 22:08:36 +0300 | [diff] [blame] | 405 | if (err) { |
| 406 | pr_debug("Failed to parse event dummy:u\n"); |
| 407 | goto out_err; |
| 408 | } |
| 409 | |
| 410 | tracking_evsel = perf_evlist__last(evlist); |
| 411 | |
| 412 | perf_evlist__set_tracking_event(evlist, tracking_evsel); |
| 413 | |
| 414 | tracking_evsel->attr.freq = 0; |
| 415 | tracking_evsel->attr.sample_period = 1; |
| 416 | |
| 417 | perf_evsel__set_sample_bit(tracking_evsel, TIME); |
| 418 | |
| 419 | /* Config events */ |
| 420 | perf_evlist__config(evlist, &opts); |
| 421 | |
| 422 | /* Check moved event is still at the front */ |
| 423 | if (cycles_evsel != perf_evlist__first(evlist)) { |
| 424 | pr_debug("Front event no longer at front"); |
| 425 | goto out_err; |
| 426 | } |
| 427 | |
| 428 | /* Check tracking event is tracking */ |
| 429 | if (!tracking_evsel->attr.mmap || !tracking_evsel->attr.comm) { |
| 430 | pr_debug("Tracking event not tracking\n"); |
| 431 | goto out_err; |
| 432 | } |
| 433 | |
| 434 | /* Check non-tracking events are not tracking */ |
| 435 | evlist__for_each(evlist, evsel) { |
| 436 | if (evsel != tracking_evsel) { |
| 437 | if (evsel->attr.mmap || evsel->attr.comm) { |
| 438 | pr_debug("Non-tracking event is tracking\n"); |
| 439 | goto out_err; |
| 440 | } |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | if (perf_evlist__open(evlist) < 0) { |
| 445 | fprintf(stderr, " (not supported)"); |
| 446 | err = 0; |
| 447 | goto out; |
| 448 | } |
| 449 | |
| 450 | err = perf_evlist__mmap(evlist, UINT_MAX, false); |
| 451 | if (err) { |
| 452 | pr_debug("perf_evlist__mmap failed!\n"); |
| 453 | goto out_err; |
| 454 | } |
| 455 | |
| 456 | perf_evlist__enable(evlist); |
| 457 | |
| 458 | err = perf_evlist__disable_event(evlist, cpu_clocks_evsel); |
| 459 | if (err) { |
| 460 | pr_debug("perf_evlist__disable_event failed!\n"); |
| 461 | goto out_err; |
| 462 | } |
| 463 | |
| 464 | err = spin_sleep(); |
| 465 | if (err) { |
| 466 | pr_debug("spin_sleep failed!\n"); |
| 467 | goto out_err; |
| 468 | } |
| 469 | |
| 470 | comm = "Test COMM 1"; |
| 471 | err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); |
| 472 | if (err) { |
| 473 | pr_debug("PR_SET_NAME failed!\n"); |
| 474 | goto out_err; |
| 475 | } |
| 476 | |
| 477 | err = perf_evlist__disable_event(evlist, cycles_evsel); |
| 478 | if (err) { |
| 479 | pr_debug("perf_evlist__disable_event failed!\n"); |
| 480 | goto out_err; |
| 481 | } |
| 482 | |
| 483 | comm = "Test COMM 2"; |
| 484 | err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); |
| 485 | if (err) { |
| 486 | pr_debug("PR_SET_NAME failed!\n"); |
| 487 | goto out_err; |
| 488 | } |
| 489 | |
| 490 | err = spin_sleep(); |
| 491 | if (err) { |
| 492 | pr_debug("spin_sleep failed!\n"); |
| 493 | goto out_err; |
| 494 | } |
| 495 | |
| 496 | comm = "Test COMM 3"; |
| 497 | err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); |
| 498 | if (err) { |
| 499 | pr_debug("PR_SET_NAME failed!\n"); |
| 500 | goto out_err; |
| 501 | } |
| 502 | |
| 503 | err = perf_evlist__enable_event(evlist, cycles_evsel); |
| 504 | if (err) { |
| 505 | pr_debug("perf_evlist__disable_event failed!\n"); |
| 506 | goto out_err; |
| 507 | } |
| 508 | |
| 509 | comm = "Test COMM 4"; |
| 510 | err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0); |
| 511 | if (err) { |
| 512 | pr_debug("PR_SET_NAME failed!\n"); |
| 513 | goto out_err; |
| 514 | } |
| 515 | |
| 516 | err = spin_sleep(); |
| 517 | if (err) { |
| 518 | pr_debug("spin_sleep failed!\n"); |
| 519 | goto out_err; |
| 520 | } |
| 521 | |
| 522 | perf_evlist__disable(evlist); |
| 523 | |
| 524 | switch_tracking.switch_evsel = switch_evsel; |
| 525 | switch_tracking.cycles_evsel = cycles_evsel; |
| 526 | |
| 527 | err = process_events(evlist, &switch_tracking); |
| 528 | |
| 529 | zfree(&switch_tracking.tids); |
| 530 | |
| 531 | if (err) |
| 532 | goto out_err; |
| 533 | |
| 534 | /* Check all 4 comm events were seen i.e. that evsel->tracking works */ |
| 535 | if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] || |
| 536 | !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) { |
| 537 | pr_debug("Missing comm events\n"); |
| 538 | goto out_err; |
| 539 | } |
| 540 | |
| 541 | /* Check cycles event got enabled */ |
| 542 | if (!switch_tracking.cycles_before_comm_1) { |
| 543 | pr_debug("Missing cycles events\n"); |
| 544 | goto out_err; |
| 545 | } |
| 546 | |
| 547 | /* Check cycles event got disabled */ |
| 548 | if (switch_tracking.cycles_between_comm_2_and_comm_3) { |
| 549 | pr_debug("cycles events even though event was disabled\n"); |
| 550 | goto out_err; |
| 551 | } |
| 552 | |
| 553 | /* Check cycles event got enabled again */ |
| 554 | if (!switch_tracking.cycles_after_comm_4) { |
| 555 | pr_debug("Missing cycles events\n"); |
| 556 | goto out_err; |
| 557 | } |
| 558 | out: |
| 559 | if (evlist) { |
| 560 | perf_evlist__disable(evlist); |
| 561 | perf_evlist__delete(evlist); |
| 562 | } else { |
| 563 | cpu_map__delete(cpus); |
| 564 | thread_map__delete(threads); |
| 565 | } |
| 566 | |
| 567 | return err; |
| 568 | |
| 569 | out_err: |
| 570 | err = -1; |
| 571 | goto out; |
| 572 | } |