blob: f2c98774e6653045e7099172ec469f8455b844dc [file] [log] [blame]
Mathieu Poirier440a23b2018-01-17 10:52:11 -07001/*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * Copyright(C) 2015-2018 Linaro Limited.
5 *
6 * Author: Tor Jeremiassen <tor@ti.com>
7 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
8 */
9
10#include <linux/bitops.h>
11#include <linux/err.h>
12#include <linux/kernel.h>
13#include <linux/log2.h>
14#include <linux/types.h>
15
16#include <stdlib.h>
17
18#include "auxtrace.h"
19#include "color.h"
20#include "cs-etm.h"
Mathieu Poirier68ffe392018-01-17 10:52:13 -070021#include "cs-etm-decoder/cs-etm-decoder.h"
Mathieu Poirier440a23b2018-01-17 10:52:11 -070022#include "debug.h"
23#include "evlist.h"
24#include "intlist.h"
25#include "machine.h"
26#include "map.h"
27#include "perf.h"
28#include "thread.h"
29#include "thread_map.h"
30#include "thread-stack.h"
31#include "util.h"
32
33#define MAX_TIMESTAMP (~0ULL)
34
35struct cs_etm_auxtrace {
36 struct auxtrace auxtrace;
37 struct auxtrace_queues queues;
38 struct auxtrace_heap heap;
39 struct itrace_synth_opts synth_opts;
40 struct perf_session *session;
41 struct machine *machine;
42 struct thread *unknown_thread;
43
44 u8 timeless_decoding;
45 u8 snapshot_mode;
46 u8 data_queued;
47 u8 sample_branches;
48
49 int num_cpu;
50 u32 auxtrace_type;
51 u64 branches_sample_type;
52 u64 branches_id;
53 u64 **metadata;
54 u64 kernel_start;
55 unsigned int pmu_type;
56};
57
58struct cs_etm_queue {
59 struct cs_etm_auxtrace *etm;
60 struct thread *thread;
61 struct cs_etm_decoder *decoder;
62 struct auxtrace_buffer *buffer;
63 const struct cs_etm_state *state;
64 union perf_event *event_buf;
65 unsigned int queue_nr;
66 pid_t pid, tid;
67 int cpu;
68 u64 time;
69 u64 timestamp;
70 u64 offset;
71};
72
Mathieu Poirier9f878b22018-01-17 10:52:17 -070073static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
74static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
75 pid_t tid, u64 time_);
76
Mathieu Poirier68ffe392018-01-17 10:52:13 -070077static void cs_etm__packet_dump(const char *pkt_string)
78{
79 const char *color = PERF_COLOR_BLUE;
80 int len = strlen(pkt_string);
81
82 if (len && (pkt_string[len-1] == '\n'))
83 color_fprintf(stdout, color, " %s", pkt_string);
84 else
85 color_fprintf(stdout, color, " %s\n", pkt_string);
86
87 fflush(stdout);
88}
89
90static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
91 struct auxtrace_buffer *buffer)
92{
93 int i, ret;
94 const char *color = PERF_COLOR_BLUE;
95 struct cs_etm_decoder_params d_params;
96 struct cs_etm_trace_params *t_params;
97 struct cs_etm_decoder *decoder;
98 size_t buffer_used = 0;
99
100 fprintf(stdout, "\n");
101 color_fprintf(stdout, color,
102 ". ... CoreSight ETM Trace data: size %zu bytes\n",
103 buffer->size);
104
105 /* Use metadata to fill in trace parameters for trace decoder */
106 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
107 for (i = 0; i < etm->num_cpu; i++) {
108 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
109 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
110 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
111 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
112 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
113 t_params[i].etmv4.reg_configr =
114 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
115 t_params[i].etmv4.reg_traceidr =
116 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
117 }
118
119 /* Set decoder parameters to simply print the trace packets */
120 d_params.packet_printer = cs_etm__packet_dump;
121 d_params.operation = CS_ETM_OPERATION_PRINT;
122 d_params.formatted = true;
123 d_params.fsyncs = false;
124 d_params.hsyncs = false;
125 d_params.frame_aligned = true;
126
127 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
128
129 zfree(&t_params);
130
131 if (!decoder)
132 return;
133 do {
134 size_t consumed;
135
136 ret = cs_etm_decoder__process_data_block(
137 decoder, buffer->offset,
138 &((u8 *)buffer->data)[buffer_used],
139 buffer->size - buffer_used, &consumed);
140 if (ret)
141 break;
142
143 buffer_used += consumed;
144 } while (buffer_used < buffer->size);
145
146 cs_etm_decoder__free(decoder);
147}
148
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700149static int cs_etm__flush_events(struct perf_session *session,
150 struct perf_tool *tool)
151{
Mathieu Poirier9f878b22018-01-17 10:52:17 -0700152 int ret;
153 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
154 struct cs_etm_auxtrace,
155 auxtrace);
156 if (dump_trace)
157 return 0;
158
159 if (!tool->ordered_events)
160 return -EINVAL;
161
162 if (!etm->timeless_decoding)
163 return -EINVAL;
164
165 ret = cs_etm__update_queues(etm);
166
167 if (ret < 0)
168 return ret;
169
170 return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700171}
172
173static void cs_etm__free_queue(void *priv)
174{
175 struct cs_etm_queue *etmq = priv;
176
Mathieu Poirier099c1132018-02-12 13:32:35 -0700177 if (!etmq)
178 return;
179
180 thread__zput(etmq->thread);
181 cs_etm_decoder__free(etmq->decoder);
182 zfree(&etmq->event_buf);
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700183 free(etmq);
184}
185
186static void cs_etm__free_events(struct perf_session *session)
187{
188 unsigned int i;
189 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
190 struct cs_etm_auxtrace,
191 auxtrace);
192 struct auxtrace_queues *queues = &aux->queues;
193
194 for (i = 0; i < queues->nr_queues; i++) {
195 cs_etm__free_queue(queues->queue_array[i].priv);
196 queues->queue_array[i].priv = NULL;
197 }
198
199 auxtrace_queues__free(queues);
200}
201
202static void cs_etm__free(struct perf_session *session)
203{
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700204 int i;
205 struct int_node *inode, *tmp;
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700206 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
207 struct cs_etm_auxtrace,
208 auxtrace);
209 cs_etm__free_events(session);
210 session->auxtrace = NULL;
211
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700212 /* First remove all traceID/CPU# nodes for the RB tree */
213 intlist__for_each_entry_safe(inode, tmp, traceid_list)
214 intlist__remove(traceid_list, inode);
215 /* Then the RB tree itself */
216 intlist__delete(traceid_list);
217
218 for (i = 0; i < aux->num_cpu; i++)
219 zfree(&aux->metadata[i]);
220
221 zfree(&aux->metadata);
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700222 zfree(&aux);
223}
224
Mathieu Poirier20d9c472018-01-17 10:52:16 -0700225static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
226 size_t size, u8 *buffer)
227{
228 u8 cpumode;
229 u64 offset;
230 int len;
231 struct thread *thread;
232 struct machine *machine;
233 struct addr_location al;
234
235 if (!etmq)
236 return -1;
237
238 machine = etmq->etm->machine;
239 if (address >= etmq->etm->kernel_start)
240 cpumode = PERF_RECORD_MISC_KERNEL;
241 else
242 cpumode = PERF_RECORD_MISC_USER;
243
244 thread = etmq->thread;
245 if (!thread) {
246 if (cpumode != PERF_RECORD_MISC_KERNEL)
247 return -EINVAL;
248 thread = etmq->etm->unknown_thread;
249 }
250
251 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
252
253 if (!al.map || !al.map->dso)
254 return 0;
255
256 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
257 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
258 return 0;
259
260 offset = al.map->map_ip(al.map, address);
261
262 map__load(al.map);
263
264 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
265
266 if (len <= 0)
267 return 0;
268
269 return len;
270}
271
272static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
273 unsigned int queue_nr)
274{
275 int i;
276 struct cs_etm_decoder_params d_params;
277 struct cs_etm_trace_params *t_params;
278 struct cs_etm_queue *etmq;
279
280 etmq = zalloc(sizeof(*etmq));
281 if (!etmq)
282 return NULL;
283
284 etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
285 if (!etmq->event_buf)
286 goto out_free;
287
288 etmq->etm = etm;
289 etmq->queue_nr = queue_nr;
290 etmq->pid = -1;
291 etmq->tid = -1;
292 etmq->cpu = -1;
293
294 /* Use metadata to fill in trace parameters for trace decoder */
295 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
296
297 if (!t_params)
298 goto out_free;
299
300 for (i = 0; i < etm->num_cpu; i++) {
301 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
302 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
303 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
304 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
305 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
306 t_params[i].etmv4.reg_configr =
307 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
308 t_params[i].etmv4.reg_traceidr =
309 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
310 }
311
312 /* Set decoder parameters to simply print the trace packets */
313 d_params.packet_printer = cs_etm__packet_dump;
314 d_params.operation = CS_ETM_OPERATION_DECODE;
315 d_params.formatted = true;
316 d_params.fsyncs = false;
317 d_params.hsyncs = false;
318 d_params.frame_aligned = true;
319 d_params.data = etmq;
320
321 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
322
323 zfree(&t_params);
324
325 if (!etmq->decoder)
326 goto out_free;
327
328 /*
329 * Register a function to handle all memory accesses required by
330 * the trace decoder library.
331 */
332 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
333 0x0L, ((u64) -1L),
334 cs_etm__mem_access))
335 goto out_free_decoder;
336
337 etmq->offset = 0;
338
339 return etmq;
340
341out_free_decoder:
342 cs_etm_decoder__free(etmq->decoder);
343out_free:
344 zfree(&etmq->event_buf);
345 free(etmq);
346
347 return NULL;
348}
349
350static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
351 struct auxtrace_queue *queue,
352 unsigned int queue_nr)
353{
354 struct cs_etm_queue *etmq = queue->priv;
355
356 if (list_empty(&queue->head) || etmq)
357 return 0;
358
359 etmq = cs_etm__alloc_queue(etm, queue_nr);
360
361 if (!etmq)
362 return -ENOMEM;
363
364 queue->priv = etmq;
365
366 if (queue->cpu != -1)
367 etmq->cpu = queue->cpu;
368
369 etmq->tid = queue->tid;
370
371 return 0;
372}
373
374static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
375{
376 unsigned int i;
377 int ret;
378
379 for (i = 0; i < etm->queues.nr_queues; i++) {
380 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
381 if (ret)
382 return ret;
383 }
384
385 return 0;
386}
387
388static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
389{
390 if (etm->queues.new_data) {
391 etm->queues.new_data = false;
392 return cs_etm__setup_queues(etm);
393 }
394
395 return 0;
396}
397
Mathieu Poirier9f878b22018-01-17 10:52:17 -0700398static int
399cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
400{
401 struct auxtrace_buffer *aux_buffer = etmq->buffer;
402 struct auxtrace_buffer *old_buffer = aux_buffer;
403 struct auxtrace_queue *queue;
404
405 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
406
407 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
408
409 /* If no more data, drop the previous auxtrace_buffer and return */
410 if (!aux_buffer) {
411 if (old_buffer)
412 auxtrace_buffer__drop_data(old_buffer);
413 buff->len = 0;
414 return 0;
415 }
416
417 etmq->buffer = aux_buffer;
418
419 /* If the aux_buffer doesn't have data associated, try to load it */
420 if (!aux_buffer->data) {
421 /* get the file desc associated with the perf data file */
422 int fd = perf_data__fd(etmq->etm->session->data);
423
424 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
425 if (!aux_buffer->data)
426 return -ENOMEM;
427 }
428
429 /* If valid, drop the previous buffer */
430 if (old_buffer)
431 auxtrace_buffer__drop_data(old_buffer);
432
433 buff->offset = aux_buffer->offset;
434 buff->len = aux_buffer->size;
435 buff->buf = aux_buffer->data;
436
437 buff->ref_timestamp = aux_buffer->reference;
438
439 return buff->len;
440}
441
442static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
443 struct auxtrace_queue *queue)
444{
445 struct cs_etm_queue *etmq = queue->priv;
446
447 /* CPU-wide tracing isn't supported yet */
448 if (queue->tid == -1)
449 return;
450
451 if ((!etmq->thread) && (etmq->tid != -1))
452 etmq->thread = machine__find_thread(etm->machine, -1,
453 etmq->tid);
454
455 if (etmq->thread) {
456 etmq->pid = etmq->thread->pid_;
457 if (queue->cpu == -1)
458 etmq->cpu = etmq->thread->cpu;
459 }
460}
461
Mathieu Poirierb12235b2018-01-17 10:52:18 -0700462/*
463 * The cs etm packet encodes an instruction range between a branch target
464 * and the next taken branch. Generate sample accordingly.
465 */
466static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
467 struct cs_etm_packet *packet)
468{
469 int ret = 0;
470 struct cs_etm_auxtrace *etm = etmq->etm;
471 struct perf_sample sample = {.ip = 0,};
472 union perf_event *event = etmq->event_buf;
473 u64 start_addr = packet->start_addr;
474 u64 end_addr = packet->end_addr;
475
476 event->sample.header.type = PERF_RECORD_SAMPLE;
477 event->sample.header.misc = PERF_RECORD_MISC_USER;
478 event->sample.header.size = sizeof(struct perf_event_header);
479
480 sample.ip = start_addr;
481 sample.pid = etmq->pid;
482 sample.tid = etmq->tid;
483 sample.addr = end_addr;
484 sample.id = etmq->etm->branches_id;
485 sample.stream_id = etmq->etm->branches_id;
486 sample.period = 1;
487 sample.cpu = packet->cpu;
488 sample.flags = 0;
489 sample.cpumode = PERF_RECORD_MISC_USER;
490
491 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
492
493 if (ret)
494 pr_err(
495 "CS ETM Trace: failed to deliver instruction event, error %d\n",
496 ret);
497
498 return ret;
499}
500
501struct cs_etm_synth {
502 struct perf_tool dummy_tool;
503 struct perf_session *session;
504};
505
506static int cs_etm__event_synth(struct perf_tool *tool,
507 union perf_event *event,
508 struct perf_sample *sample __maybe_unused,
509 struct machine *machine __maybe_unused)
510{
511 struct cs_etm_synth *cs_etm_synth =
512 container_of(tool, struct cs_etm_synth, dummy_tool);
513
514 return perf_session__deliver_synth_event(cs_etm_synth->session,
515 event, NULL);
516}
517
518static int cs_etm__synth_event(struct perf_session *session,
519 struct perf_event_attr *attr, u64 id)
520{
521 struct cs_etm_synth cs_etm_synth;
522
523 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
524 cs_etm_synth.session = session;
525
526 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
527 &id, cs_etm__event_synth);
528}
529
530static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
531 struct perf_session *session)
532{
533 struct perf_evlist *evlist = session->evlist;
534 struct perf_evsel *evsel;
535 struct perf_event_attr attr;
536 bool found = false;
537 u64 id;
538 int err;
539
540 evlist__for_each_entry(evlist, evsel) {
541 if (evsel->attr.type == etm->pmu_type) {
542 found = true;
543 break;
544 }
545 }
546
547 if (!found) {
548 pr_debug("No selected events with CoreSight Trace data\n");
549 return 0;
550 }
551
552 memset(&attr, 0, sizeof(struct perf_event_attr));
553 attr.size = sizeof(struct perf_event_attr);
554 attr.type = PERF_TYPE_HARDWARE;
555 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
556 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
557 PERF_SAMPLE_PERIOD;
558 if (etm->timeless_decoding)
559 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
560 else
561 attr.sample_type |= PERF_SAMPLE_TIME;
562
563 attr.exclude_user = evsel->attr.exclude_user;
564 attr.exclude_kernel = evsel->attr.exclude_kernel;
565 attr.exclude_hv = evsel->attr.exclude_hv;
566 attr.exclude_host = evsel->attr.exclude_host;
567 attr.exclude_guest = evsel->attr.exclude_guest;
568 attr.sample_id_all = evsel->attr.sample_id_all;
569 attr.read_format = evsel->attr.read_format;
570
571 /* create new id val to be a fixed offset from evsel id */
572 id = evsel->id[0] + 1000000000;
573
574 if (!id)
575 id = 1;
576
577 if (etm->synth_opts.branches) {
578 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
579 attr.sample_period = 1;
580 attr.sample_type |= PERF_SAMPLE_ADDR;
581 err = cs_etm__synth_event(session, &attr, id);
582 if (err)
583 return err;
584 etm->sample_branches = true;
585 etm->branches_sample_type = attr.sample_type;
586 etm->branches_id = id;
587 }
588
589 return 0;
590}
591
592static int cs_etm__sample(struct cs_etm_queue *etmq)
593{
594 int ret;
595 struct cs_etm_packet packet;
596
597 while (1) {
598 ret = cs_etm_decoder__get_packet(etmq->decoder, &packet);
599 if (ret <= 0)
600 return ret;
601
602 /*
603 * If the packet contains an instruction range, generate an
604 * instruction sequence event.
605 */
606 if (packet.sample_type & CS_ETM_RANGE)
607 cs_etm__synth_branch_sample(etmq, &packet);
608 }
609
610 return 0;
611}
612
Mathieu Poirier9f878b22018-01-17 10:52:17 -0700613static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
614{
615 struct cs_etm_auxtrace *etm = etmq->etm;
616 struct cs_etm_buffer buffer;
617 size_t buffer_used, processed;
618 int err = 0;
619
620 if (!etm->kernel_start)
621 etm->kernel_start = machine__kernel_start(etm->machine);
622
623 /* Go through each buffer in the queue and decode them one by one */
624more:
625 buffer_used = 0;
626 memset(&buffer, 0, sizeof(buffer));
627 err = cs_etm__get_trace(&buffer, etmq);
628 if (err <= 0)
629 return err;
630 /*
631 * We cannot assume consecutive blocks in the data file are contiguous,
632 * reset the decoder to force re-sync.
633 */
634 err = cs_etm_decoder__reset(etmq->decoder);
635 if (err != 0)
636 return err;
637
638 /* Run trace decoder until buffer consumed or end of trace */
639 do {
640 processed = 0;
641
642 err = cs_etm_decoder__process_data_block(
643 etmq->decoder,
644 etmq->offset,
645 &buffer.buf[buffer_used],
646 buffer.len - buffer_used,
647 &processed);
648
649 if (err)
650 return err;
651
652 etmq->offset += processed;
653 buffer_used += processed;
Mathieu Poirierb12235b2018-01-17 10:52:18 -0700654
655 /*
656 * Nothing to do with an error condition, let's hope the next
657 * chunk will be better.
658 */
659 err = cs_etm__sample(etmq);
Mathieu Poirier9f878b22018-01-17 10:52:17 -0700660 } while (buffer.len > buffer_used);
661
662goto more;
663
664 return err;
665}
666
667static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
668 pid_t tid, u64 time_)
669{
670 unsigned int i;
671 struct auxtrace_queues *queues = &etm->queues;
672
673 for (i = 0; i < queues->nr_queues; i++) {
674 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
675 struct cs_etm_queue *etmq = queue->priv;
676
677 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
678 etmq->time = time_;
679 cs_etm__set_pid_tid_cpu(etm, queue);
680 cs_etm__run_decoder(etmq);
681 }
682 }
683
684 return 0;
685}
686
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700687static int cs_etm__process_event(struct perf_session *session,
688 union perf_event *event,
689 struct perf_sample *sample,
690 struct perf_tool *tool)
691{
Mathieu Poirier20d9c472018-01-17 10:52:16 -0700692 int err = 0;
693 u64 timestamp;
694 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
695 struct cs_etm_auxtrace,
696 auxtrace);
697
Mathieu Poirier20d9c472018-01-17 10:52:16 -0700698 if (dump_trace)
699 return 0;
700
701 if (!tool->ordered_events) {
702 pr_err("CoreSight ETM Trace requires ordered events\n");
703 return -EINVAL;
704 }
705
706 if (!etm->timeless_decoding)
707 return -EINVAL;
708
709 if (sample->time && (sample->time != (u64) -1))
710 timestamp = sample->time;
711 else
712 timestamp = 0;
713
714 if (timestamp || etm->timeless_decoding) {
715 err = cs_etm__update_queues(etm);
716 if (err)
717 return err;
718 }
719
Mathieu Poirier9f878b22018-01-17 10:52:17 -0700720 if (event->header.type == PERF_RECORD_EXIT)
721 return cs_etm__process_timeless_queues(etm,
722 event->fork.tid,
723 sample->time);
724
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700725 return 0;
726}
727
728static int cs_etm__process_auxtrace_event(struct perf_session *session,
729 union perf_event *event,
Mathieu Poirier68ffe392018-01-17 10:52:13 -0700730 struct perf_tool *tool __maybe_unused)
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700731{
Mathieu Poirier68ffe392018-01-17 10:52:13 -0700732 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
733 struct cs_etm_auxtrace,
734 auxtrace);
735 if (!etm->data_queued) {
736 struct auxtrace_buffer *buffer;
737 off_t data_offset;
738 int fd = perf_data__fd(session->data);
739 bool is_pipe = perf_data__is_pipe(session->data);
740 int err;
741
742 if (is_pipe)
743 data_offset = 0;
744 else {
745 data_offset = lseek(fd, 0, SEEK_CUR);
746 if (data_offset == -1)
747 return -errno;
748 }
749
750 err = auxtrace_queues__add_event(&etm->queues, session,
751 event, data_offset, &buffer);
752 if (err)
753 return err;
754
755 if (dump_trace)
756 if (auxtrace_buffer__get_data(buffer, fd)) {
757 cs_etm__dump_event(etm, buffer);
758 auxtrace_buffer__put_data(buffer);
759 }
760 }
761
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700762 return 0;
763}
764
765static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
766{
767 struct perf_evsel *evsel;
768 struct perf_evlist *evlist = etm->session->evlist;
769 bool timeless_decoding = true;
770
771 /*
772 * Circle through the list of event and complain if we find one
773 * with the time bit set.
774 */
775 evlist__for_each_entry(evlist, evsel) {
776 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
777 timeless_decoding = false;
778 }
779
780 return timeless_decoding;
781}
782
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700783static const char * const cs_etm_global_header_fmts[] = {
784 [CS_HEADER_VERSION_0] = " Header version %llx\n",
785 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
786 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
787};
788
789static const char * const cs_etm_priv_fmts[] = {
790 [CS_ETM_MAGIC] = " Magic number %llx\n",
791 [CS_ETM_CPU] = " CPU %lld\n",
792 [CS_ETM_ETMCR] = " ETMCR %llx\n",
793 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
794 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
795 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
796};
797
798static const char * const cs_etmv4_priv_fmts[] = {
799 [CS_ETM_MAGIC] = " Magic number %llx\n",
800 [CS_ETM_CPU] = " CPU %lld\n",
801 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
802 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
803 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
804 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
805 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
806 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
807 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
808};
809
810static void cs_etm__print_auxtrace_info(u64 *val, int num)
811{
812 int i, j, cpu = 0;
813
814 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
815 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
816
817 for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
818 if (val[i] == __perf_cs_etmv3_magic)
819 for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
820 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
821 else if (val[i] == __perf_cs_etmv4_magic)
822 for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
823 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
824 else
825 /* failure.. return */
826 return;
827 }
828}
829
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700830int cs_etm__process_auxtrace_info(union perf_event *event,
831 struct perf_session *session)
832{
833 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
834 struct cs_etm_auxtrace *etm = NULL;
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700835 struct int_node *inode;
836 unsigned int pmu_type;
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700837 int event_header_size = sizeof(struct perf_event_header);
838 int info_header_size;
839 int total_size = auxtrace_info->header.size;
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700840 int priv_size = 0;
841 int num_cpu;
842 int err = 0, idx = -1;
843 int i, j, k;
844 u64 *ptr, *hdr = NULL;
845 u64 **metadata = NULL;
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700846
847 /*
848 * sizeof(auxtrace_info_event::type) +
849 * sizeof(auxtrace_info_event::reserved) == 8
850 */
851 info_header_size = 8;
852
853 if (total_size < (event_header_size + info_header_size))
854 return -EINVAL;
855
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700856 priv_size = total_size - event_header_size - info_header_size;
857
858 /* First the global part */
859 ptr = (u64 *) auxtrace_info->priv;
860
861 /* Look for version '0' of the header */
862 if (ptr[0] != 0)
863 return -EINVAL;
864
865 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
866 if (!hdr)
867 return -ENOMEM;
868
869 /* Extract header information - see cs-etm.h for format */
870 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
871 hdr[i] = ptr[i];
872 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
873 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
874 0xffffffff);
875
876 /*
877 * Create an RB tree for traceID-CPU# tuple. Since the conversion has
878 * to be made for each packet that gets decoded, optimizing access in
879 * anything other than a sequential array is worth doing.
880 */
881 traceid_list = intlist__new(NULL);
882 if (!traceid_list) {
883 err = -ENOMEM;
884 goto err_free_hdr;
885 }
886
887 metadata = zalloc(sizeof(*metadata) * num_cpu);
888 if (!metadata) {
889 err = -ENOMEM;
890 goto err_free_traceid_list;
891 }
892
893 /*
894 * The metadata is stored in the auxtrace_info section and encodes
895 * the configuration of the ARM embedded trace macrocell which is
896 * required by the trace decoder to properly decode the trace due
897 * to its highly compressed nature.
898 */
899 for (j = 0; j < num_cpu; j++) {
900 if (ptr[i] == __perf_cs_etmv3_magic) {
901 metadata[j] = zalloc(sizeof(*metadata[j]) *
902 CS_ETM_PRIV_MAX);
903 if (!metadata[j]) {
904 err = -ENOMEM;
905 goto err_free_metadata;
906 }
907 for (k = 0; k < CS_ETM_PRIV_MAX; k++)
908 metadata[j][k] = ptr[i + k];
909
910 /* The traceID is our handle */
911 idx = metadata[j][CS_ETM_ETMTRACEIDR];
912 i += CS_ETM_PRIV_MAX;
913 } else if (ptr[i] == __perf_cs_etmv4_magic) {
914 metadata[j] = zalloc(sizeof(*metadata[j]) *
915 CS_ETMV4_PRIV_MAX);
916 if (!metadata[j]) {
917 err = -ENOMEM;
918 goto err_free_metadata;
919 }
920 for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
921 metadata[j][k] = ptr[i + k];
922
923 /* The traceID is our handle */
924 idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
925 i += CS_ETMV4_PRIV_MAX;
926 }
927
928 /* Get an RB node for this CPU */
929 inode = intlist__findnew(traceid_list, idx);
930
931 /* Something went wrong, no need to continue */
932 if (!inode) {
933 err = PTR_ERR(inode);
934 goto err_free_metadata;
935 }
936
937 /*
938 * The node for that CPU should not be taken.
939 * Back out if that's the case.
940 */
941 if (inode->priv) {
942 err = -EINVAL;
943 goto err_free_metadata;
944 }
945 /* All good, associate the traceID with the CPU# */
946 inode->priv = &metadata[j][CS_ETM_CPU];
947 }
948
949 /*
950 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
951 * CS_ETMV4_PRIV_MAX mark how many double words are in the
952 * global metadata, and each cpu's metadata respectively.
953 * The following tests if the correct number of double words was
954 * present in the auxtrace info section.
955 */
956 if (i * 8 != priv_size) {
957 err = -EINVAL;
958 goto err_free_metadata;
959 }
960
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700961 etm = zalloc(sizeof(*etm));
962
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700963 if (!etm) {
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700964 err = -ENOMEM;
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700965 goto err_free_metadata;
966 }
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700967
968 err = auxtrace_queues__init(&etm->queues);
969 if (err)
970 goto err_free_etm;
971
972 etm->session = session;
973 etm->machine = &session->machines.host;
974
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700975 etm->num_cpu = num_cpu;
976 etm->pmu_type = pmu_type;
977 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
978 etm->metadata = metadata;
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700979 etm->auxtrace_type = auxtrace_info->type;
980 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
981
982 etm->auxtrace.process_event = cs_etm__process_event;
983 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
984 etm->auxtrace.flush_events = cs_etm__flush_events;
985 etm->auxtrace.free_events = cs_etm__free_events;
986 etm->auxtrace.free = cs_etm__free;
987 session->auxtrace = &etm->auxtrace;
988
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700989 if (dump_trace) {
990 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700991 return 0;
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -0700992 }
Mathieu Poirier440a23b2018-01-17 10:52:11 -0700993
Mathieu Poirierb12235b2018-01-17 10:52:18 -0700994 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
995 etm->synth_opts = *session->itrace_synth_opts;
996 } else {
997 itrace_synth_opts__set_default(&etm->synth_opts);
998 etm->synth_opts.callchain = false;
999 }
1000
1001 err = cs_etm__synth_events(etm, session);
1002 if (err)
1003 goto err_free_queues;
1004
Mathieu Poirier440a23b2018-01-17 10:52:11 -07001005 err = auxtrace_queues__process_index(&etm->queues, session);
1006 if (err)
1007 goto err_free_queues;
1008
1009 etm->data_queued = etm->queues.populated;
1010
1011 return 0;
1012
1013err_free_queues:
1014 auxtrace_queues__free(&etm->queues);
1015 session->auxtrace = NULL;
1016err_free_etm:
1017 zfree(&etm);
Tor Jeremiassencd8bfd82018-01-17 10:52:12 -07001018err_free_metadata:
1019 /* No need to check @metadata[j], free(NULL) is supported */
1020 for (j = 0; j < num_cpu; j++)
1021 free(metadata[j]);
1022 zfree(&metadata);
1023err_free_traceid_list:
1024 intlist__delete(traceid_list);
1025err_free_hdr:
1026 zfree(&hdr);
Mathieu Poirier440a23b2018-01-17 10:52:11 -07001027
1028 return -EINVAL;
1029}