blob: 7ba39fe20a8a286c6ab5ea1063b3c63b38926898 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
14 *
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
19 */
20
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/vmalloc.h>
24#include <linux/errno.h>
25
26#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Eric Dumazet8b8b4982008-05-14 16:05:31 -070031DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
David Howellsc4028952006-11-22 14:57:56 +000033static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#define DEFAULT_TIMER_EXPIRE (HZ / 10)
36static int work_enabled;
37
38void free_cpu_buffers(void)
39{
40 int i;
Carl Lovea5598ca2008-10-14 23:37:01 +000041
Carl Lovef4156d12008-08-11 17:25:43 +100042 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070043 vfree(per_cpu(cpu_buffer, i).buffer);
Carl Lovef4156d12008-08-11 17:25:43 +100044 per_cpu(cpu_buffer, i).buffer = NULL;
45 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
Jesper Juhl77933d72005-07-27 11:46:09 -070047
Carl Lovea5598ca2008-10-14 23:37:01 +000048unsigned long oprofile_get_cpu_buffer_size(void)
49{
50 return fs_cpu_buffer_size;
51}
52
53void oprofile_cpu_buffer_inc_smpl_lost(void)
54{
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
57
58 cpu_buf->sample_lost_overflow++;
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061int alloc_cpu_buffers(void)
62{
63 int i;
64
65 unsigned long buffer_size = fs_cpu_buffer_size;
66
67 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070068 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Eric Dumazet25ab7cd2006-01-08 01:03:21 -080070 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
71 cpu_to_node(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 if (!b->buffer)
73 goto fail;
74
75 b->last_task = NULL;
76 b->last_is_kernel = -1;
77 b->tracing = 0;
78 b->buffer_size = buffer_size;
79 b->tail_pos = 0;
80 b->head_pos = 0;
81 b->sample_received = 0;
82 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -080083 b->backtrace_aborted = 0;
84 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +000086 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 }
88 return 0;
89
90fail:
91 free_cpu_buffers();
92 return -ENOMEM;
93}
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95void start_cpu_work(void)
96{
97 int i;
98
99 work_enabled = 1;
100
101 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700102 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 /*
105 * Spread the work by 1 jiffy per cpu so they dont all
106 * fire at once.
107 */
108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
109 }
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112void end_cpu_work(void)
113{
114 int i;
115
116 work_enabled = 0;
117
118 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700119 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 cancel_delayed_work(&b->work);
122 }
123
124 flush_scheduled_work();
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* Resets the cpu buffer to a sane state. */
128void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
129{
130 /* reset these to invalid values; the next sample
131 * collected will populate the buffer with proper
132 * values to initialize the buffer
133 */
134 cpu_buf->last_is_kernel = -1;
135 cpu_buf->last_task = NULL;
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* compute number of available slots in cpu_buffer queue */
139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
140{
141 unsigned long head = b->head_pos;
142 unsigned long tail = b->tail_pos;
143
144 if (tail > head)
145 return (tail - head) - 1;
146
147 return tail + (b->buffer_size - head) - 1;
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static void increment_head(struct oprofile_cpu_buffer * b)
151{
152 unsigned long new_head = b->head_pos + 1;
153
154 /* Ensure anything written to the slot before we
155 * increment is visible */
156 wmb();
157
158 if (new_head < b->buffer_size)
159 b->head_pos = new_head;
160 else
161 b->head_pos = 0;
162}
163
Jesper Juhl77933d72005-07-27 11:46:09 -0700164static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165add_sample(struct oprofile_cpu_buffer * cpu_buf,
166 unsigned long pc, unsigned long event)
167{
168 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
169 entry->eip = pc;
170 entry->event = event;
171 increment_head(cpu_buf);
172}
173
Jesper Juhl77933d72005-07-27 11:46:09 -0700174static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
176{
177 add_sample(buffer, ESCAPE_CODE, value);
178}
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/* This must be safe from any context. It's safe writing here
181 * because of the head/tail separation of the writer and reader
182 * of the CPU buffer.
183 *
184 * is_kernel is needed because on some architectures you cannot
185 * tell if you are in kernel or user space simply by looking at
186 * pc. We tag this in the buffer by generating kernel enter/exit
187 * events whenever is_kernel changes
188 */
189static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
190 int is_kernel, unsigned long event)
191{
192 struct task_struct * task;
193
194 cpu_buf->sample_received++;
195
Philippe Eliedf9d1772007-11-14 16:58:48 -0800196 if (pc == ESCAPE_CODE) {
197 cpu_buf->sample_invalid_eip++;
198 return 0;
199 }
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (nr_available_slots(cpu_buf) < 3) {
202 cpu_buf->sample_lost_overflow++;
203 return 0;
204 }
205
206 is_kernel = !!is_kernel;
207
208 task = current;
209
210 /* notice a switch from user->kernel or vice versa */
211 if (cpu_buf->last_is_kernel != is_kernel) {
212 cpu_buf->last_is_kernel = is_kernel;
213 add_code(cpu_buf, is_kernel);
214 }
215
216 /* notice a task switch */
217 if (cpu_buf->last_task != task) {
218 cpu_buf->last_task = task;
219 add_code(cpu_buf, (unsigned long)task);
220 }
221
222 add_sample(cpu_buf, pc, event);
223 return 1;
224}
225
Barry Kasindorf345c2572008-07-22 21:08:54 +0200226static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
228 if (nr_available_slots(cpu_buf) < 4) {
229 cpu_buf->sample_lost_overflow++;
230 return 0;
231 }
232
233 add_code(cpu_buf, CPU_TRACE_BEGIN);
234 cpu_buf->tracing = 1;
235 return 1;
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
239{
240 cpu_buf->tracing = 0;
241}
242
Brian Rogan27357712006-03-28 01:56:20 -0800243void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
244 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
Mike Travis608dfdd2008-04-28 02:14:15 -0700246 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 if (!backtrace_depth) {
249 log_sample(cpu_buf, pc, is_kernel, event);
250 return;
251 }
252
253 if (!oprofile_begin_trace(cpu_buf))
254 return;
255
256 /* if log_sample() fail we can't backtrace since we lost the source
257 * of this event */
258 if (log_sample(cpu_buf, pc, is_kernel, event))
259 oprofile_ops.backtrace(regs, backtrace_depth);
260 oprofile_end_trace(cpu_buf);
261}
262
Brian Rogan27357712006-03-28 01:56:20 -0800263void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
264{
265 int is_kernel = !user_mode(regs);
266 unsigned long pc = profile_pc(regs);
267
268 oprofile_add_ext_sample(pc, regs, event, is_kernel);
269}
270
Robert Richter852402c2008-07-22 21:09:06 +0200271#ifdef CONFIG_OPROFILE_IBS
272
Barry Kasindorf345c2572008-07-22 21:08:54 +0200273#define MAX_IBS_SAMPLE_SIZE 14
274static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
275 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
276{
277 struct task_struct *task;
278
279 cpu_buf->sample_received++;
280
281 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
282 cpu_buf->sample_lost_overflow++;
283 return 0;
284 }
285
286 is_kernel = !!is_kernel;
287
288 /* notice a switch from user->kernel or vice versa */
289 if (cpu_buf->last_is_kernel != is_kernel) {
290 cpu_buf->last_is_kernel = is_kernel;
291 add_code(cpu_buf, is_kernel);
292 }
293
294 /* notice a task switch */
295 if (!is_kernel) {
296 task = current;
297
298 if (cpu_buf->last_task != task) {
299 cpu_buf->last_task = task;
300 add_code(cpu_buf, (unsigned long)task);
301 }
302 }
303
304 add_code(cpu_buf, ibs_code);
305 add_sample(cpu_buf, ibs[0], ibs[1]);
306 add_sample(cpu_buf, ibs[2], ibs[3]);
307 add_sample(cpu_buf, ibs[4], ibs[5]);
308
309 if (ibs_code == IBS_OP_BEGIN) {
310 add_sample(cpu_buf, ibs[6], ibs[7]);
311 add_sample(cpu_buf, ibs[8], ibs[9]);
312 add_sample(cpu_buf, ibs[10], ibs[11]);
313 }
314
315 return 1;
316}
317
318void oprofile_add_ibs_sample(struct pt_regs *const regs,
319 unsigned int * const ibs_sample, u8 code)
320{
321 int is_kernel = !user_mode(regs);
322 unsigned long pc = profile_pc(regs);
323
324 struct oprofile_cpu_buffer *cpu_buf =
325 &per_cpu(cpu_buffer, smp_processor_id());
326
327 if (!backtrace_depth) {
328 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
329 return;
330 }
331
332 /* if log_sample() fails we can't backtrace since we lost the source
333 * of this event */
334 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
335 oprofile_ops.backtrace(regs, backtrace_depth);
336}
337
Robert Richter852402c2008-07-22 21:09:06 +0200338#endif
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
341{
Mike Travis608dfdd2008-04-28 02:14:15 -0700342 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 log_sample(cpu_buf, pc, is_kernel, event);
344}
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346void oprofile_add_trace(unsigned long pc)
347{
Mike Travis608dfdd2008-04-28 02:14:15 -0700348 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 if (!cpu_buf->tracing)
351 return;
352
353 if (nr_available_slots(cpu_buf) < 1) {
354 cpu_buf->tracing = 0;
355 cpu_buf->sample_lost_overflow++;
356 return;
357 }
358
359 /* broken frame can give an eip with the same value as an escape code,
360 * abort the trace if we get it */
361 if (pc == ESCAPE_CODE) {
362 cpu_buf->tracing = 0;
363 cpu_buf->backtrace_aborted++;
364 return;
365 }
366
367 add_sample(cpu_buf, pc, 0);
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370/*
371 * This serves to avoid cpu buffer overflow, and makes sure
372 * the task mortuary progresses
373 *
374 * By using schedule_delayed_work_on and then schedule_delayed_work
375 * we guarantee this will stay on the correct cpu
376 */
David Howellsc4028952006-11-22 14:57:56 +0000377static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
David Howellsc4028952006-11-22 14:57:56 +0000379 struct oprofile_cpu_buffer * b =
380 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200382 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 smp_processor_id(), b->cpu);
384 }
385 sync_buffer(b->cpu);
386
387 /* don't re-add the work if we're shutting down */
388 if (work_enabled)
389 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
390}