blob: 4decab624e76ddb5cd22c894831dfd7cc84f2b9e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
14 *
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
19 */
20
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/vmalloc.h>
24#include <linux/errno.h>
25
26#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Eric Dumazet8b8b4982008-05-14 16:05:31 -070031DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
David Howellsc4028952006-11-22 14:57:56 +000033static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#define DEFAULT_TIMER_EXPIRE (HZ / 10)
36static int work_enabled;
37
38void free_cpu_buffers(void)
39{
40 int i;
41
Andrew Morton394e3902006-03-23 03:01:05 -080042 for_each_online_cpu(i)
Mike Travis608dfdd2008-04-28 02:14:15 -070043 vfree(per_cpu(cpu_buffer, i).buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
Jesper Juhl77933d72005-07-27 11:46:09 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046int alloc_cpu_buffers(void)
47{
48 int i;
49
50 unsigned long buffer_size = fs_cpu_buffer_size;
51
52 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070053 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Eric Dumazet25ab7cd2006-01-08 01:03:21 -080055 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
56 cpu_to_node(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 if (!b->buffer)
58 goto fail;
59
60 b->last_task = NULL;
61 b->last_is_kernel = -1;
62 b->tracing = 0;
63 b->buffer_size = buffer_size;
64 b->tail_pos = 0;
65 b->head_pos = 0;
66 b->sample_received = 0;
67 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -080068 b->backtrace_aborted = 0;
69 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +000071 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 }
73 return 0;
74
75fail:
76 free_cpu_buffers();
77 return -ENOMEM;
78}
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80void start_cpu_work(void)
81{
82 int i;
83
84 work_enabled = 1;
85
86 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070087 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 /*
90 * Spread the work by 1 jiffy per cpu so they dont all
91 * fire at once.
92 */
93 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
94 }
95}
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097void end_cpu_work(void)
98{
99 int i;
100
101 work_enabled = 0;
102
103 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700104 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 cancel_delayed_work(&b->work);
107 }
108
109 flush_scheduled_work();
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/* Resets the cpu buffer to a sane state. */
113void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
114{
115 /* reset these to invalid values; the next sample
116 * collected will populate the buffer with proper
117 * values to initialize the buffer
118 */
119 cpu_buf->last_is_kernel = -1;
120 cpu_buf->last_task = NULL;
121}
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123/* compute number of available slots in cpu_buffer queue */
124static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
125{
126 unsigned long head = b->head_pos;
127 unsigned long tail = b->tail_pos;
128
129 if (tail > head)
130 return (tail - head) - 1;
131
132 return tail + (b->buffer_size - head) - 1;
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static void increment_head(struct oprofile_cpu_buffer * b)
136{
137 unsigned long new_head = b->head_pos + 1;
138
139 /* Ensure anything written to the slot before we
140 * increment is visible */
141 wmb();
142
143 if (new_head < b->buffer_size)
144 b->head_pos = new_head;
145 else
146 b->head_pos = 0;
147}
148
Jesper Juhl77933d72005-07-27 11:46:09 -0700149static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150add_sample(struct oprofile_cpu_buffer * cpu_buf,
151 unsigned long pc, unsigned long event)
152{
153 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
154 entry->eip = pc;
155 entry->event = event;
156 increment_head(cpu_buf);
157}
158
Jesper Juhl77933d72005-07-27 11:46:09 -0700159static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
161{
162 add_sample(buffer, ESCAPE_CODE, value);
163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/* This must be safe from any context. It's safe writing here
166 * because of the head/tail separation of the writer and reader
167 * of the CPU buffer.
168 *
169 * is_kernel is needed because on some architectures you cannot
170 * tell if you are in kernel or user space simply by looking at
171 * pc. We tag this in the buffer by generating kernel enter/exit
172 * events whenever is_kernel changes
173 */
174static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
175 int is_kernel, unsigned long event)
176{
177 struct task_struct * task;
178
179 cpu_buf->sample_received++;
180
Philippe Eliedf9d1772007-11-14 16:58:48 -0800181 if (pc == ESCAPE_CODE) {
182 cpu_buf->sample_invalid_eip++;
183 return 0;
184 }
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 if (nr_available_slots(cpu_buf) < 3) {
187 cpu_buf->sample_lost_overflow++;
188 return 0;
189 }
190
191 is_kernel = !!is_kernel;
192
193 task = current;
194
195 /* notice a switch from user->kernel or vice versa */
196 if (cpu_buf->last_is_kernel != is_kernel) {
197 cpu_buf->last_is_kernel = is_kernel;
198 add_code(cpu_buf, is_kernel);
199 }
200
201 /* notice a task switch */
202 if (cpu_buf->last_task != task) {
203 cpu_buf->last_task = task;
204 add_code(cpu_buf, (unsigned long)task);
205 }
206
207 add_sample(cpu_buf, pc, event);
208 return 1;
209}
210
Barry Kasindorf345c2572008-07-22 21:08:54 +0200211static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
213 if (nr_available_slots(cpu_buf) < 4) {
214 cpu_buf->sample_lost_overflow++;
215 return 0;
216 }
217
218 add_code(cpu_buf, CPU_TRACE_BEGIN);
219 cpu_buf->tracing = 1;
220 return 1;
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
224{
225 cpu_buf->tracing = 0;
226}
227
Brian Rogan27357712006-03-28 01:56:20 -0800228void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
229 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
Mike Travis608dfdd2008-04-28 02:14:15 -0700231 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 if (!backtrace_depth) {
234 log_sample(cpu_buf, pc, is_kernel, event);
235 return;
236 }
237
238 if (!oprofile_begin_trace(cpu_buf))
239 return;
240
241 /* if log_sample() fail we can't backtrace since we lost the source
242 * of this event */
243 if (log_sample(cpu_buf, pc, is_kernel, event))
244 oprofile_ops.backtrace(regs, backtrace_depth);
245 oprofile_end_trace(cpu_buf);
246}
247
Brian Rogan27357712006-03-28 01:56:20 -0800248void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
249{
250 int is_kernel = !user_mode(regs);
251 unsigned long pc = profile_pc(regs);
252
253 oprofile_add_ext_sample(pc, regs, event, is_kernel);
254}
255
Robert Richter852402c2008-07-22 21:09:06 +0200256#ifdef CONFIG_OPROFILE_IBS
257
Barry Kasindorf345c2572008-07-22 21:08:54 +0200258#define MAX_IBS_SAMPLE_SIZE 14
259static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
260 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
261{
262 struct task_struct *task;
263
264 cpu_buf->sample_received++;
265
266 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
267 cpu_buf->sample_lost_overflow++;
268 return 0;
269 }
270
271 is_kernel = !!is_kernel;
272
273 /* notice a switch from user->kernel or vice versa */
274 if (cpu_buf->last_is_kernel != is_kernel) {
275 cpu_buf->last_is_kernel = is_kernel;
276 add_code(cpu_buf, is_kernel);
277 }
278
279 /* notice a task switch */
280 if (!is_kernel) {
281 task = current;
282
283 if (cpu_buf->last_task != task) {
284 cpu_buf->last_task = task;
285 add_code(cpu_buf, (unsigned long)task);
286 }
287 }
288
289 add_code(cpu_buf, ibs_code);
290 add_sample(cpu_buf, ibs[0], ibs[1]);
291 add_sample(cpu_buf, ibs[2], ibs[3]);
292 add_sample(cpu_buf, ibs[4], ibs[5]);
293
294 if (ibs_code == IBS_OP_BEGIN) {
295 add_sample(cpu_buf, ibs[6], ibs[7]);
296 add_sample(cpu_buf, ibs[8], ibs[9]);
297 add_sample(cpu_buf, ibs[10], ibs[11]);
298 }
299
300 return 1;
301}
302
303void oprofile_add_ibs_sample(struct pt_regs *const regs,
304 unsigned int * const ibs_sample, u8 code)
305{
306 int is_kernel = !user_mode(regs);
307 unsigned long pc = profile_pc(regs);
308
309 struct oprofile_cpu_buffer *cpu_buf =
310 &per_cpu(cpu_buffer, smp_processor_id());
311
312 if (!backtrace_depth) {
313 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
314 return;
315 }
316
317 /* if log_sample() fails we can't backtrace since we lost the source
318 * of this event */
319 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
320 oprofile_ops.backtrace(regs, backtrace_depth);
321}
322
Robert Richter852402c2008-07-22 21:09:06 +0200323#endif
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
326{
Mike Travis608dfdd2008-04-28 02:14:15 -0700327 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 log_sample(cpu_buf, pc, is_kernel, event);
329}
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331void oprofile_add_trace(unsigned long pc)
332{
Mike Travis608dfdd2008-04-28 02:14:15 -0700333 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 if (!cpu_buf->tracing)
336 return;
337
338 if (nr_available_slots(cpu_buf) < 1) {
339 cpu_buf->tracing = 0;
340 cpu_buf->sample_lost_overflow++;
341 return;
342 }
343
344 /* broken frame can give an eip with the same value as an escape code,
345 * abort the trace if we get it */
346 if (pc == ESCAPE_CODE) {
347 cpu_buf->tracing = 0;
348 cpu_buf->backtrace_aborted++;
349 return;
350 }
351
352 add_sample(cpu_buf, pc, 0);
353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355/*
356 * This serves to avoid cpu buffer overflow, and makes sure
357 * the task mortuary progresses
358 *
359 * By using schedule_delayed_work_on and then schedule_delayed_work
360 * we guarantee this will stay on the correct cpu
361 */
David Howellsc4028952006-11-22 14:57:56 +0000362static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
David Howellsc4028952006-11-22 14:57:56 +0000364 struct oprofile_cpu_buffer * b =
365 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200367 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 smp_processor_id(), b->cpu);
369 }
370 sync_buffer(b->cpu);
371
372 /* don't re-add the work if we're shutting down */
373 if (work_enabled)
374 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
375}