blob: 61090969158facb8ae1843b6eba255202fdf8c4a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
14 *
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
19 */
20
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/vmalloc.h>
24#include <linux/errno.h>
Robert Richter6a180372008-10-16 15:01:40 +020025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Robert Richter6dad8282008-12-09 01:21:32 +010031#define OP_BUFFER_FLAGS 0
32
33/*
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write;
Eric Dumazet8b8b4982008-05-14 16:05:31 -070050DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David Howellsc4028952006-11-22 14:57:56 +000052static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54#define DEFAULT_TIMER_EXPIRE (HZ / 10)
55static int work_enabled;
56
57void free_cpu_buffers(void)
58{
Robert Richter6dad8282008-12-09 01:21:32 +010059 if (op_ring_buffer_read)
60 ring_buffer_free(op_ring_buffer_read);
61 op_ring_buffer_read = NULL;
62 if (op_ring_buffer_write)
63 ring_buffer_free(op_ring_buffer_write);
64 op_ring_buffer_write = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065}
Jesper Juhl77933d72005-07-27 11:46:09 -070066
Carl Lovea5598ca2008-10-14 23:37:01 +000067unsigned long oprofile_get_cpu_buffer_size(void)
68{
69 return fs_cpu_buffer_size;
70}
71
72void oprofile_cpu_buffer_inc_smpl_lost(void)
73{
74 struct oprofile_cpu_buffer *cpu_buf
75 = &__get_cpu_var(cpu_buffer);
76
77 cpu_buf->sample_lost_overflow++;
78}
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080int alloc_cpu_buffers(void)
81{
82 int i;
Robert Richter6a180372008-10-16 15:01:40 +020083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 unsigned long buffer_size = fs_cpu_buffer_size;
Robert Richter6a180372008-10-16 15:01:40 +020085
Robert Richter6dad8282008-12-09 01:21:32 +010086 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
87 if (!op_ring_buffer_read)
88 goto fail;
89 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_write)
91 goto fail;
92
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050093 for_each_possible_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070094 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Robert Richter6a180372008-10-16 15:01:40 +020095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 b->last_task = NULL;
97 b->last_is_kernel = -1;
98 b->tracing = 0;
99 b->buffer_size = buffer_size;
100 b->tail_pos = 0;
101 b->head_pos = 0;
102 b->sample_received = 0;
103 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -0800104 b->backtrace_aborted = 0;
105 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +0000107 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 return 0;
110
111fail:
112 free_cpu_buffers();
113 return -ENOMEM;
114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116void start_cpu_work(void)
117{
118 int i;
119
120 work_enabled = 1;
121
122 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700123 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 /*
126 * Spread the work by 1 jiffy per cpu so they dont all
127 * fire at once.
128 */
129 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
130 }
131}
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133void end_cpu_work(void)
134{
135 int i;
136
137 work_enabled = 0;
138
139 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700140 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 cancel_delayed_work(&b->work);
143 }
144
145 flush_scheduled_work();
146}
147
Robert Richter211117f2008-12-09 02:13:25 +0100148static inline int
Robert Richter25ad29132008-09-05 17:12:36 +0200149add_sample(struct oprofile_cpu_buffer *cpu_buf,
Robert Richter6a180372008-10-16 15:01:40 +0200150 unsigned long pc, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
Robert Richter6dad8282008-12-09 01:21:32 +0100152 struct op_entry entry;
Robert Richter211117f2008-12-09 02:13:25 +0100153 int ret;
Robert Richter6dad8282008-12-09 01:21:32 +0100154
Robert Richter211117f2008-12-09 02:13:25 +0100155 ret = cpu_buffer_write_entry(&entry);
156 if (ret)
157 return ret;
Robert Richter6dad8282008-12-09 01:21:32 +0100158
159 entry.sample->eip = pc;
160 entry.sample->event = event;
161
Robert Richter211117f2008-12-09 02:13:25 +0100162 ret = cpu_buffer_write_commit(&entry);
163 if (ret)
164 return ret;
Robert Richter6dad8282008-12-09 01:21:32 +0100165
Robert Richter211117f2008-12-09 02:13:25 +0100166 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
Robert Richter211117f2008-12-09 02:13:25 +0100169static inline int
Robert Richter25ad29132008-09-05 17:12:36 +0200170add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Robert Richter211117f2008-12-09 02:13:25 +0100172 return add_sample(buffer, ESCAPE_CODE, value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* This must be safe from any context. It's safe writing here
176 * because of the head/tail separation of the writer and reader
177 * of the CPU buffer.
178 *
179 * is_kernel is needed because on some architectures you cannot
180 * tell if you are in kernel or user space simply by looking at
181 * pc. We tag this in the buffer by generating kernel enter/exit
182 * events whenever is_kernel changes
183 */
Robert Richter25ad29132008-09-05 17:12:36 +0200184static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 int is_kernel, unsigned long event)
186{
Robert Richter25ad29132008-09-05 17:12:36 +0200187 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 cpu_buf->sample_received++;
190
Philippe Eliedf9d1772007-11-14 16:58:48 -0800191 if (pc == ESCAPE_CODE) {
192 cpu_buf->sample_invalid_eip++;
193 return 0;
194 }
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 is_kernel = !!is_kernel;
197
198 task = current;
199
200 /* notice a switch from user->kernel or vice versa */
201 if (cpu_buf->last_is_kernel != is_kernel) {
202 cpu_buf->last_is_kernel = is_kernel;
Robert Richter211117f2008-12-09 02:13:25 +0100203 if (add_code(cpu_buf, is_kernel))
204 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 }
206
207 /* notice a task switch */
208 if (cpu_buf->last_task != task) {
209 cpu_buf->last_task = task;
Robert Richter211117f2008-12-09 02:13:25 +0100210 if (add_code(cpu_buf, (unsigned long)task))
211 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 }
Robert Richter6a180372008-10-16 15:01:40 +0200213
Robert Richter211117f2008-12-09 02:13:25 +0100214 if (add_sample(cpu_buf, pc, event))
215 goto fail;
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 return 1;
Robert Richter211117f2008-12-09 02:13:25 +0100218
219fail:
220 cpu_buf->sample_lost_overflow++;
221 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Barry Kasindorf345c2572008-07-22 21:08:54 +0200224static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 add_code(cpu_buf, CPU_TRACE_BEGIN);
227 cpu_buf->tracing = 1;
228 return 1;
229}
230
Robert Richter25ad29132008-09-05 17:12:36 +0200231static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
233 cpu_buf->tracing = 0;
234}
235
Brian Rogan27357712006-03-28 01:56:20 -0800236void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
237 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
Mike Travis608dfdd2008-04-28 02:14:15 -0700239 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 if (!backtrace_depth) {
242 log_sample(cpu_buf, pc, is_kernel, event);
243 return;
244 }
245
246 if (!oprofile_begin_trace(cpu_buf))
247 return;
248
Robert Richterfd13f6c2008-10-19 21:00:09 +0200249 /*
250 * if log_sample() fail we can't backtrace since we lost the
251 * source of this event
252 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 if (log_sample(cpu_buf, pc, is_kernel, event))
254 oprofile_ops.backtrace(regs, backtrace_depth);
255 oprofile_end_trace(cpu_buf);
256}
257
Brian Rogan27357712006-03-28 01:56:20 -0800258void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
259{
260 int is_kernel = !user_mode(regs);
261 unsigned long pc = profile_pc(regs);
262
263 oprofile_add_ext_sample(pc, regs, event, is_kernel);
264}
265
Robert Richter852402c2008-07-22 21:09:06 +0200266#ifdef CONFIG_OPROFILE_IBS
267
Robert Richtere2fee272008-07-18 17:36:20 +0200268#define MAX_IBS_SAMPLE_SIZE 14
269
Robert Richtercdc18342008-09-26 22:18:44 -0400270void oprofile_add_ibs_sample(struct pt_regs * const regs,
271 unsigned int * const ibs_sample, int ibs_code)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200272{
Robert Richtere2fee272008-07-18 17:36:20 +0200273 int is_kernel = !user_mode(regs);
274 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200275 struct task_struct *task;
Robert Richter211117f2008-12-09 02:13:25 +0100276 int fail = 0;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200277
278 cpu_buf->sample_received++;
279
Barry Kasindorf345c2572008-07-22 21:08:54 +0200280 /* notice a switch from user->kernel or vice versa */
281 if (cpu_buf->last_is_kernel != is_kernel) {
Robert Richter211117f2008-12-09 02:13:25 +0100282 if (add_code(cpu_buf, is_kernel))
283 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200284 cpu_buf->last_is_kernel = is_kernel;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200285 }
286
287 /* notice a task switch */
288 if (!is_kernel) {
289 task = current;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200290 if (cpu_buf->last_task != task) {
Robert Richter211117f2008-12-09 02:13:25 +0100291 if (add_code(cpu_buf, (unsigned long)task))
292 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200293 cpu_buf->last_task = task;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200294 }
295 }
296
Robert Richter211117f2008-12-09 02:13:25 +0100297 fail = fail || add_code(cpu_buf, ibs_code);
298 fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
299 fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
300 fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200301
302 if (ibs_code == IBS_OP_BEGIN) {
Robert Richter211117f2008-12-09 02:13:25 +0100303 fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
304 fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
305 fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200306 }
307
Robert Richter211117f2008-12-09 02:13:25 +0100308 if (fail)
309 goto fail;
310
Robert Richtere2fee272008-07-18 17:36:20 +0200311 if (backtrace_depth)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200312 oprofile_ops.backtrace(regs, backtrace_depth);
Robert Richter211117f2008-12-09 02:13:25 +0100313
314 return;
315
316fail:
317 cpu_buf->sample_lost_overflow++;
318 return;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200319}
320
Robert Richter852402c2008-07-22 21:09:06 +0200321#endif
322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
324{
Mike Travis608dfdd2008-04-28 02:14:15 -0700325 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 log_sample(cpu_buf, pc, is_kernel, event);
327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329void oprofile_add_trace(unsigned long pc)
330{
Mike Travis608dfdd2008-04-28 02:14:15 -0700331 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 if (!cpu_buf->tracing)
334 return;
335
Robert Richterfd13f6c2008-10-19 21:00:09 +0200336 /*
337 * broken frame can give an eip with the same value as an
338 * escape code, abort the trace if we get it
339 */
Robert Richter211117f2008-12-09 02:13:25 +0100340 if (pc == ESCAPE_CODE)
341 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Robert Richter211117f2008-12-09 02:13:25 +0100343 if (add_sample(cpu_buf, pc, 0))
344 goto fail;
345
346 return;
347fail:
348 cpu_buf->tracing = 0;
349 cpu_buf->backtrace_aborted++;
350 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353/*
354 * This serves to avoid cpu buffer overflow, and makes sure
355 * the task mortuary progresses
356 *
357 * By using schedule_delayed_work_on and then schedule_delayed_work
358 * we guarantee this will stay on the correct cpu
359 */
David Howellsc4028952006-11-22 14:57:56 +0000360static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Robert Richter25ad29132008-09-05 17:12:36 +0200362 struct oprofile_cpu_buffer *b =
David Howellsc4028952006-11-22 14:57:56 +0000363 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200365 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 smp_processor_id(), b->cpu);
Chris J Arges4bd9b9d2008-10-15 11:03:39 -0500367
368 if (!cpu_online(b->cpu)) {
369 cancel_delayed_work(&b->work);
370 return;
371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373 sync_buffer(b->cpu);
374
375 /* don't re-add the work if we're shutting down */
376 if (work_enabled)
377 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
378}