blob: fd4be94125fbc9007bbb11f1c90d8984f31084d5 [file] [log] [blame]
Jiri Olsa5f86b802014-08-01 13:02:58 -03001#include <linux/list.h>
Jiri Olsacee3ab92014-07-11 14:49:54 +02002#include <linux/compiler.h>
Alexander Yarygin54bf53b2014-10-03 18:40:11 +04003#include <linux/string.h>
Jiri Olsa5f86b802014-08-01 13:02:58 -03004#include "ordered-events.h"
5#include "evlist.h"
6#include "session.h"
7#include "asm/bug.h"
8#include "debug.h"
9
Jiri Olsacee3ab92014-07-11 14:49:54 +020010#define pr_N(n, fmt, ...) \
11 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
12
13#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
14
Jiri Olsa5f86b802014-08-01 13:02:58 -030015static void queue_event(struct ordered_events *oe, struct ordered_event *new)
16{
17 struct ordered_event *last = oe->last;
18 u64 timestamp = new->timestamp;
19 struct list_head *p;
20
21 ++oe->nr_events;
22 oe->last = new;
23
Jiri Olsacee3ab92014-07-11 14:49:54 +020024 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
25
Jiri Olsa5f86b802014-08-01 13:02:58 -030026 if (!last) {
27 list_add(&new->list, &oe->events);
28 oe->max_timestamp = timestamp;
29 return;
30 }
31
32 /*
33 * last event might point to some random place in the list as it's
34 * the last queued event. We expect that the new event is close to
35 * this.
36 */
37 if (last->timestamp <= timestamp) {
38 while (last->timestamp <= timestamp) {
39 p = last->list.next;
40 if (p == &oe->events) {
41 list_add_tail(&new->list, &oe->events);
42 oe->max_timestamp = timestamp;
43 return;
44 }
45 last = list_entry(p, struct ordered_event, list);
46 }
47 list_add_tail(&new->list, &last->list);
48 } else {
49 while (last->timestamp > timestamp) {
50 p = last->list.prev;
51 if (p == &oe->events) {
52 list_add(&new->list, &oe->events);
53 return;
54 }
55 last = list_entry(p, struct ordered_event, list);
56 }
57 list_add(&new->list, &last->list);
58 }
59}
60
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040061static union perf_event *__dup_event(struct ordered_events *oe,
62 union perf_event *event)
63{
64 union perf_event *new_event = NULL;
65
66 if (oe->cur_alloc_size < oe->max_alloc_size) {
67 new_event = memdup(event, event->header.size);
68 if (new_event)
69 oe->cur_alloc_size += event->header.size;
70 }
71
72 return new_event;
73}
74
75static union perf_event *dup_event(struct ordered_events *oe,
76 union perf_event *event)
77{
78 return oe->copy_on_queue ? __dup_event(oe, event) : event;
79}
80
81static void free_dup_event(struct ordered_events *oe, union perf_event *event)
82{
83 if (oe->copy_on_queue) {
84 oe->cur_alloc_size -= event->header.size;
85 free(event);
86 }
87}
88
Jiri Olsa5f86b802014-08-01 13:02:58 -030089#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040090static struct ordered_event *alloc_event(struct ordered_events *oe,
91 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -030092{
93 struct list_head *cache = &oe->cache;
94 struct ordered_event *new = NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040095 union perf_event *new_event;
96
97 new_event = dup_event(oe, event);
98 if (!new_event)
99 return NULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300100
101 if (!list_empty(cache)) {
102 new = list_entry(cache->next, struct ordered_event, list);
103 list_del(&new->list);
104 } else if (oe->buffer) {
105 new = oe->buffer + oe->buffer_idx;
106 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
107 oe->buffer = NULL;
108 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
109 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
110
111 oe->buffer = malloc(size);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400112 if (!oe->buffer) {
113 free_dup_event(oe, new_event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300114 return NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400115 }
Jiri Olsa5f86b802014-08-01 13:02:58 -0300116
Jiri Olsacee3ab92014-07-11 14:49:54 +0200117 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
118 oe->cur_alloc_size, size, oe->max_alloc_size);
119
Jiri Olsa5f86b802014-08-01 13:02:58 -0300120 oe->cur_alloc_size += size;
121 list_add(&oe->buffer->list, &oe->to_free);
122
123 /* First entry is abused to maintain the to_free list. */
124 oe->buffer_idx = 2;
125 new = oe->buffer + 1;
Jiri Olsacee3ab92014-07-11 14:49:54 +0200126 } else {
127 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300128 }
129
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400130 new->event = new_event;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300131 return new;
132}
133
134struct ordered_event *
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400135ordered_events__new(struct ordered_events *oe, u64 timestamp,
136 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300137{
138 struct ordered_event *new;
139
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400140 new = alloc_event(oe, event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300141 if (new) {
142 new->timestamp = timestamp;
143 queue_event(oe, new);
144 }
145
146 return new;
147}
148
149void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
150{
Jiri Olsafa4e5c62014-06-15 19:46:08 +0200151 list_move(&event->list, &oe->cache);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300152 oe->nr_events--;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400153 free_dup_event(oe, event->event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300154}
155
156static int __ordered_events__flush(struct perf_session *s,
157 struct perf_tool *tool)
158{
159 struct ordered_events *oe = &s->ordered_events;
160 struct list_head *head = &oe->events;
161 struct ordered_event *tmp, *iter;
162 struct perf_sample sample;
163 u64 limit = oe->next_flush;
164 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
165 bool show_progress = limit == ULLONG_MAX;
166 struct ui_progress prog;
167 int ret;
168
169 if (!tool->ordered_events || !limit)
170 return 0;
171
172 if (show_progress)
173 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
174
175 list_for_each_entry_safe(iter, tmp, head, list) {
176 if (session_done())
177 return 0;
178
179 if (iter->timestamp > limit)
180 break;
181
182 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
183 if (ret)
184 pr_err("Can't parse sample, err = %d\n", ret);
185 else {
186 ret = perf_session__deliver_event(s, iter->event, &sample, tool,
187 iter->file_offset);
188 if (ret)
189 return ret;
190 }
191
192 ordered_events__delete(oe, iter);
193 oe->last_flush = iter->timestamp;
194
195 if (show_progress)
196 ui_progress__update(&prog, 1);
197 }
198
199 if (list_empty(head))
200 oe->last = NULL;
201 else if (last_ts <= limit)
202 oe->last = list_entry(head->prev, struct ordered_event, list);
203
204 return 0;
205}
206
207int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
208 enum oe_flush how)
209{
210 struct ordered_events *oe = &s->ordered_events;
Jiri Olsacee3ab92014-07-11 14:49:54 +0200211 static const char * const str[] = {
Jiri Olsab0a45202014-06-12 09:50:11 +0200212 "NONE",
Jiri Olsacee3ab92014-07-11 14:49:54 +0200213 "FINAL",
214 "ROUND",
215 "HALF ",
216 };
Jiri Olsa5f86b802014-08-01 13:02:58 -0300217 int err;
218
219 switch (how) {
220 case OE_FLUSH__FINAL:
221 oe->next_flush = ULLONG_MAX;
222 break;
223
224 case OE_FLUSH__HALF:
225 {
226 struct ordered_event *first, *last;
227 struct list_head *head = &oe->events;
228
229 first = list_entry(head->next, struct ordered_event, list);
230 last = oe->last;
231
232 /* Warn if we are called before any event got allocated. */
233 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
234 return 0;
235
236 oe->next_flush = first->timestamp;
237 oe->next_flush += (last->timestamp - first->timestamp) / 2;
238 break;
239 }
240
241 case OE_FLUSH__ROUND:
Jiri Olsab0a45202014-06-12 09:50:11 +0200242 case OE_FLUSH__NONE:
Jiri Olsa5f86b802014-08-01 13:02:58 -0300243 default:
244 break;
245 };
246
Jiri Olsacee3ab92014-07-11 14:49:54 +0200247 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
248 str[how], oe->nr_events);
249 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
250
Jiri Olsa5f86b802014-08-01 13:02:58 -0300251 err = __ordered_events__flush(s, tool);
252
253 if (!err) {
254 if (how == OE_FLUSH__ROUND)
255 oe->next_flush = oe->max_timestamp;
Jiri Olsab0a45202014-06-12 09:50:11 +0200256
257 oe->last_flush_type = how;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300258 }
259
Jiri Olsacee3ab92014-07-11 14:49:54 +0200260 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
261 str[how], oe->nr_events);
262 pr_oe_time(oe->last_flush, "last_flush\n");
263
Jiri Olsa5f86b802014-08-01 13:02:58 -0300264 return err;
265}
Jiri Olsa36522f52014-06-10 22:47:40 +0200266
267void ordered_events__init(struct ordered_events *oe)
268{
269 INIT_LIST_HEAD(&oe->events);
270 INIT_LIST_HEAD(&oe->cache);
271 INIT_LIST_HEAD(&oe->to_free);
272 oe->max_alloc_size = (u64) -1;
273 oe->cur_alloc_size = 0;
274}
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200275
276void ordered_events__free(struct ordered_events *oe)
277{
278 while (!list_empty(&oe->to_free)) {
279 struct ordered_event *event;
280
281 event = list_entry(oe->to_free.next, struct ordered_event, list);
282 list_del(&event->list);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400283 free_dup_event(oe, event->event);
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200284 free(event);
285 }
286}