blob: 0d8cea91d2c92311f44fa83f798aad53e56be161 [file] [log] [blame]
Jiri Olsa5f86b802014-08-01 13:02:58 -03001#include <linux/list.h>
Jiri Olsacee3ab92014-07-11 14:49:54 +02002#include <linux/compiler.h>
Alexander Yarygin54bf53b2014-10-03 18:40:11 +04003#include <linux/string.h>
Jiri Olsa5f86b802014-08-01 13:02:58 -03004#include "ordered-events.h"
5#include "evlist.h"
6#include "session.h"
7#include "asm/bug.h"
8#include "debug.h"
9
Jiri Olsacee3ab92014-07-11 14:49:54 +020010#define pr_N(n, fmt, ...) \
11 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
12
13#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
14
Jiri Olsa5f86b802014-08-01 13:02:58 -030015static void queue_event(struct ordered_events *oe, struct ordered_event *new)
16{
17 struct ordered_event *last = oe->last;
18 u64 timestamp = new->timestamp;
19 struct list_head *p;
20
21 ++oe->nr_events;
22 oe->last = new;
23
Jiri Olsacee3ab92014-07-11 14:49:54 +020024 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
25
Jiri Olsa5f86b802014-08-01 13:02:58 -030026 if (!last) {
27 list_add(&new->list, &oe->events);
28 oe->max_timestamp = timestamp;
29 return;
30 }
31
32 /*
33 * last event might point to some random place in the list as it's
34 * the last queued event. We expect that the new event is close to
35 * this.
36 */
37 if (last->timestamp <= timestamp) {
38 while (last->timestamp <= timestamp) {
39 p = last->list.next;
40 if (p == &oe->events) {
41 list_add_tail(&new->list, &oe->events);
42 oe->max_timestamp = timestamp;
43 return;
44 }
45 last = list_entry(p, struct ordered_event, list);
46 }
47 list_add_tail(&new->list, &last->list);
48 } else {
49 while (last->timestamp > timestamp) {
50 p = last->list.prev;
51 if (p == &oe->events) {
52 list_add(&new->list, &oe->events);
53 return;
54 }
55 last = list_entry(p, struct ordered_event, list);
56 }
57 list_add(&new->list, &last->list);
58 }
59}
60
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040061static union perf_event *__dup_event(struct ordered_events *oe,
62 union perf_event *event)
63{
64 union perf_event *new_event = NULL;
65
66 if (oe->cur_alloc_size < oe->max_alloc_size) {
67 new_event = memdup(event, event->header.size);
68 if (new_event)
69 oe->cur_alloc_size += event->header.size;
70 }
71
72 return new_event;
73}
74
75static union perf_event *dup_event(struct ordered_events *oe,
76 union perf_event *event)
77{
78 return oe->copy_on_queue ? __dup_event(oe, event) : event;
79}
80
81static void free_dup_event(struct ordered_events *oe, union perf_event *event)
82{
83 if (oe->copy_on_queue) {
84 oe->cur_alloc_size -= event->header.size;
85 free(event);
86 }
87}
88
Jiri Olsa5f86b802014-08-01 13:02:58 -030089#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040090static struct ordered_event *alloc_event(struct ordered_events *oe,
91 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -030092{
93 struct list_head *cache = &oe->cache;
94 struct ordered_event *new = NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040095 union perf_event *new_event;
96
97 new_event = dup_event(oe, event);
98 if (!new_event)
99 return NULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300100
101 if (!list_empty(cache)) {
102 new = list_entry(cache->next, struct ordered_event, list);
103 list_del(&new->list);
104 } else if (oe->buffer) {
105 new = oe->buffer + oe->buffer_idx;
106 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
107 oe->buffer = NULL;
108 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
109 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
110
111 oe->buffer = malloc(size);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400112 if (!oe->buffer) {
113 free_dup_event(oe, new_event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300114 return NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400115 }
Jiri Olsa5f86b802014-08-01 13:02:58 -0300116
Jiri Olsacee3ab92014-07-11 14:49:54 +0200117 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
118 oe->cur_alloc_size, size, oe->max_alloc_size);
119
Jiri Olsa5f86b802014-08-01 13:02:58 -0300120 oe->cur_alloc_size += size;
121 list_add(&oe->buffer->list, &oe->to_free);
122
123 /* First entry is abused to maintain the to_free list. */
124 oe->buffer_idx = 2;
125 new = oe->buffer + 1;
Jiri Olsacee3ab92014-07-11 14:49:54 +0200126 } else {
127 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300128 }
129
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400130 new->event = new_event;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300131 return new;
132}
133
134struct ordered_event *
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400135ordered_events__new(struct ordered_events *oe, u64 timestamp,
136 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300137{
138 struct ordered_event *new;
139
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400140 new = alloc_event(oe, event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300141 if (new) {
142 new->timestamp = timestamp;
143 queue_event(oe, new);
144 }
145
146 return new;
147}
148
149void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
150{
Jiri Olsafa4e5c62014-06-15 19:46:08 +0200151 list_move(&event->list, &oe->cache);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300152 oe->nr_events--;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400153 free_dup_event(oe, event->event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300154}
155
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300156static int __ordered_events__flush(struct ordered_events *oe)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300157{
Jiri Olsa5f86b802014-08-01 13:02:58 -0300158 struct list_head *head = &oe->events;
159 struct ordered_event *tmp, *iter;
160 struct perf_sample sample;
161 u64 limit = oe->next_flush;
162 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
163 bool show_progress = limit == ULLONG_MAX;
164 struct ui_progress prog;
165 int ret;
166
Arnaldo Carvalho de Melo28083682015-02-22 13:52:47 -0800167 if (!limit)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300168 return 0;
169
170 if (show_progress)
171 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
172
173 list_for_each_entry_safe(iter, tmp, head, list) {
174 if (session_done())
175 return 0;
176
177 if (iter->timestamp > limit)
178 break;
179
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300180 ret = perf_evlist__parse_sample(oe->evlist, iter->event, &sample);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300181 if (ret)
182 pr_err("Can't parse sample, err = %d\n", ret);
183 else {
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300184 ret = oe->deliver(oe, iter, &sample);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300185 if (ret)
186 return ret;
187 }
188
189 ordered_events__delete(oe, iter);
190 oe->last_flush = iter->timestamp;
191
192 if (show_progress)
193 ui_progress__update(&prog, 1);
194 }
195
196 if (list_empty(head))
197 oe->last = NULL;
198 else if (last_ts <= limit)
199 oe->last = list_entry(head->prev, struct ordered_event, list);
200
201 return 0;
202}
203
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300204int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300205{
Jiri Olsacee3ab92014-07-11 14:49:54 +0200206 static const char * const str[] = {
Jiri Olsab0a45202014-06-12 09:50:11 +0200207 "NONE",
Jiri Olsacee3ab92014-07-11 14:49:54 +0200208 "FINAL",
209 "ROUND",
210 "HALF ",
211 };
Jiri Olsa5f86b802014-08-01 13:02:58 -0300212 int err;
213
Arnaldo Carvalho de Melo28083682015-02-22 13:52:47 -0800214 if (oe->nr_events == 0)
215 return 0;
216
Jiri Olsa5f86b802014-08-01 13:02:58 -0300217 switch (how) {
218 case OE_FLUSH__FINAL:
219 oe->next_flush = ULLONG_MAX;
220 break;
221
222 case OE_FLUSH__HALF:
223 {
224 struct ordered_event *first, *last;
225 struct list_head *head = &oe->events;
226
227 first = list_entry(head->next, struct ordered_event, list);
228 last = oe->last;
229
230 /* Warn if we are called before any event got allocated. */
231 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
232 return 0;
233
234 oe->next_flush = first->timestamp;
235 oe->next_flush += (last->timestamp - first->timestamp) / 2;
236 break;
237 }
238
239 case OE_FLUSH__ROUND:
Jiri Olsab0a45202014-06-12 09:50:11 +0200240 case OE_FLUSH__NONE:
Jiri Olsa5f86b802014-08-01 13:02:58 -0300241 default:
242 break;
243 };
244
Jiri Olsacee3ab92014-07-11 14:49:54 +0200245 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
246 str[how], oe->nr_events);
247 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
248
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300249 err = __ordered_events__flush(oe);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300250
251 if (!err) {
252 if (how == OE_FLUSH__ROUND)
253 oe->next_flush = oe->max_timestamp;
Jiri Olsab0a45202014-06-12 09:50:11 +0200254
255 oe->last_flush_type = how;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300256 }
257
Jiri Olsacee3ab92014-07-11 14:49:54 +0200258 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
259 str[how], oe->nr_events);
260 pr_oe_time(oe->last_flush, "last_flush\n");
261
Jiri Olsa5f86b802014-08-01 13:02:58 -0300262 return err;
263}
Jiri Olsa36522f52014-06-10 22:47:40 +0200264
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300265void ordered_events__init(struct ordered_events *oe, struct machines *machines,
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300266 struct perf_evlist *evlist, struct perf_tool *tool,
267 ordered_events__deliver_t deliver)
Jiri Olsa36522f52014-06-10 22:47:40 +0200268{
269 INIT_LIST_HEAD(&oe->events);
270 INIT_LIST_HEAD(&oe->cache);
271 INIT_LIST_HEAD(&oe->to_free);
272 oe->max_alloc_size = (u64) -1;
273 oe->cur_alloc_size = 0;
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300274 oe->evlist = evlist;
275 oe->machines = machines;
276 oe->tool = tool;
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300277 oe->deliver = deliver;
Jiri Olsa36522f52014-06-10 22:47:40 +0200278}
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200279
280void ordered_events__free(struct ordered_events *oe)
281{
282 while (!list_empty(&oe->to_free)) {
283 struct ordered_event *event;
284
285 event = list_entry(oe->to_free.next, struct ordered_event, list);
286 list_del(&event->list);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400287 free_dup_event(oe, event->event);
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200288 free(event);
289 }
290}