blob: 3958107723fbc66ef514882225a810679a7e00a1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
14 *
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
19 */
20
21#include <linux/sched.h>
22#include <linux/oprofile.h>
23#include <linux/vmalloc.h>
24#include <linux/errno.h>
Robert Richter6a180372008-10-16 15:01:40 +020025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Eric Dumazet8b8b4982008-05-14 16:05:31 -070031DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
David Howellsc4028952006-11-22 14:57:56 +000033static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#define DEFAULT_TIMER_EXPIRE (HZ / 10)
36static int work_enabled;
37
38void free_cpu_buffers(void)
39{
40 int i;
Carl Lovea5598ca2008-10-14 23:37:01 +000041
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050042 for_each_possible_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070043 vfree(per_cpu(cpu_buffer, i).buffer);
Carl Lovef4156d12008-08-11 17:25:43 +100044 per_cpu(cpu_buffer, i).buffer = NULL;
45 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
Jesper Juhl77933d72005-07-27 11:46:09 -070047
Carl Lovea5598ca2008-10-14 23:37:01 +000048unsigned long oprofile_get_cpu_buffer_size(void)
49{
50 return fs_cpu_buffer_size;
51}
52
53void oprofile_cpu_buffer_inc_smpl_lost(void)
54{
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
57
58 cpu_buf->sample_lost_overflow++;
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061int alloc_cpu_buffers(void)
62{
63 int i;
Robert Richter6a180372008-10-16 15:01:40 +020064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 unsigned long buffer_size = fs_cpu_buffer_size;
Robert Richter6a180372008-10-16 15:01:40 +020066
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050067 for_each_possible_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070068 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Robert Richter6a180372008-10-16 15:01:40 +020069
Eric Dumazet25ab7cd2006-01-08 01:03:21 -080070 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
71 cpu_to_node(i));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 if (!b->buffer)
73 goto fail;
Robert Richter6a180372008-10-16 15:01:40 +020074
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 b->last_task = NULL;
76 b->last_is_kernel = -1;
77 b->tracing = 0;
78 b->buffer_size = buffer_size;
79 b->tail_pos = 0;
80 b->head_pos = 0;
81 b->sample_received = 0;
82 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -080083 b->backtrace_aborted = 0;
84 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +000086 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 }
88 return 0;
89
90fail:
91 free_cpu_buffers();
92 return -ENOMEM;
93}
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95void start_cpu_work(void)
96{
97 int i;
98
99 work_enabled = 1;
100
101 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700102 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 /*
105 * Spread the work by 1 jiffy per cpu so they dont all
106 * fire at once.
107 */
108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
109 }
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112void end_cpu_work(void)
113{
114 int i;
115
116 work_enabled = 0;
117
118 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700119 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 cancel_delayed_work(&b->work);
122 }
123
124 flush_scheduled_work();
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* Resets the cpu buffer to a sane state. */
Robert Richter25ad29132008-09-05 17:12:36 +0200128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
Robert Richterfd13f6c2008-10-19 21:00:09 +0200130 /*
131 * reset these to invalid values; the next sample collected
132 * will populate the buffer with proper values to initialize
133 * the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 */
135 cpu_buf->last_is_kernel = -1;
136 cpu_buf->last_task = NULL;
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/* compute number of available slots in cpu_buffer queue */
Robert Richter25ad29132008-09-05 17:12:36 +0200140static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 unsigned long head = b->head_pos;
143 unsigned long tail = b->tail_pos;
144
145 if (tail > head)
146 return (tail - head) - 1;
147
148 return tail + (b->buffer_size - head) - 1;
149}
150
Robert Richter25ad29132008-09-05 17:12:36 +0200151static void increment_head(struct oprofile_cpu_buffer *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
153 unsigned long new_head = b->head_pos + 1;
154
Robert Richterfd13f6c2008-10-19 21:00:09 +0200155 /*
156 * Ensure anything written to the slot before we increment is
157 * visible
158 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 wmb();
160
161 if (new_head < b->buffer_size)
162 b->head_pos = new_head;
163 else
164 b->head_pos = 0;
165}
166
Jesper Juhl77933d72005-07-27 11:46:09 -0700167static inline void
Robert Richter25ad29132008-09-05 17:12:36 +0200168add_sample(struct oprofile_cpu_buffer *cpu_buf,
Robert Richter6a180372008-10-16 15:01:40 +0200169 unsigned long pc, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
Robert Richter25ad29132008-09-05 17:12:36 +0200171 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 entry->eip = pc;
173 entry->event = event;
174 increment_head(cpu_buf);
175}
176
Jesper Juhl77933d72005-07-27 11:46:09 -0700177static inline void
Robert Richter25ad29132008-09-05 17:12:36 +0200178add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 add_sample(buffer, ESCAPE_CODE, value);
181}
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/* This must be safe from any context. It's safe writing here
184 * because of the head/tail separation of the writer and reader
185 * of the CPU buffer.
186 *
187 * is_kernel is needed because on some architectures you cannot
188 * tell if you are in kernel or user space simply by looking at
189 * pc. We tag this in the buffer by generating kernel enter/exit
190 * events whenever is_kernel changes
191 */
Robert Richter25ad29132008-09-05 17:12:36 +0200192static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 int is_kernel, unsigned long event)
194{
Robert Richter25ad29132008-09-05 17:12:36 +0200195 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 cpu_buf->sample_received++;
198
Philippe Eliedf9d1772007-11-14 16:58:48 -0800199 if (pc == ESCAPE_CODE) {
200 cpu_buf->sample_invalid_eip++;
201 return 0;
202 }
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (nr_available_slots(cpu_buf) < 3) {
205 cpu_buf->sample_lost_overflow++;
206 return 0;
207 }
208
209 is_kernel = !!is_kernel;
210
211 task = current;
212
213 /* notice a switch from user->kernel or vice versa */
214 if (cpu_buf->last_is_kernel != is_kernel) {
215 cpu_buf->last_is_kernel = is_kernel;
216 add_code(cpu_buf, is_kernel);
217 }
218
219 /* notice a task switch */
220 if (cpu_buf->last_task != task) {
221 cpu_buf->last_task = task;
222 add_code(cpu_buf, (unsigned long)task);
223 }
Robert Richter6a180372008-10-16 15:01:40 +0200224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 add_sample(cpu_buf, pc, event);
226 return 1;
227}
228
Barry Kasindorf345c2572008-07-22 21:08:54 +0200229static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 if (nr_available_slots(cpu_buf) < 4) {
232 cpu_buf->sample_lost_overflow++;
233 return 0;
234 }
235
236 add_code(cpu_buf, CPU_TRACE_BEGIN);
237 cpu_buf->tracing = 1;
238 return 1;
239}
240
Robert Richter25ad29132008-09-05 17:12:36 +0200241static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 cpu_buf->tracing = 0;
244}
245
Brian Rogan27357712006-03-28 01:56:20 -0800246void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
247 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Mike Travis608dfdd2008-04-28 02:14:15 -0700249 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 if (!backtrace_depth) {
252 log_sample(cpu_buf, pc, is_kernel, event);
253 return;
254 }
255
256 if (!oprofile_begin_trace(cpu_buf))
257 return;
258
Robert Richterfd13f6c2008-10-19 21:00:09 +0200259 /*
260 * if log_sample() fail we can't backtrace since we lost the
261 * source of this event
262 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (log_sample(cpu_buf, pc, is_kernel, event))
264 oprofile_ops.backtrace(regs, backtrace_depth);
265 oprofile_end_trace(cpu_buf);
266}
267
Brian Rogan27357712006-03-28 01:56:20 -0800268void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
269{
270 int is_kernel = !user_mode(regs);
271 unsigned long pc = profile_pc(regs);
272
273 oprofile_add_ext_sample(pc, regs, event, is_kernel);
274}
275
Robert Richter852402c2008-07-22 21:09:06 +0200276#ifdef CONFIG_OPROFILE_IBS
277
Robert Richtere2fee272008-07-18 17:36:20 +0200278#define MAX_IBS_SAMPLE_SIZE 14
279
280void oprofile_add_ibs_sample(struct pt_regs *const regs,
Robert Richter25ad29132008-09-05 17:12:36 +0200281 unsigned int *const ibs_sample, int ibs_code)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200282{
Robert Richtere2fee272008-07-18 17:36:20 +0200283 int is_kernel = !user_mode(regs);
284 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200285 struct task_struct *task;
286
287 cpu_buf->sample_received++;
288
289 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
Robert Richtere2fee272008-07-18 17:36:20 +0200290 /* we can't backtrace since we lost the source of this event */
Barry Kasindorf345c2572008-07-22 21:08:54 +0200291 cpu_buf->sample_lost_overflow++;
Robert Richtere2fee272008-07-18 17:36:20 +0200292 return;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200293 }
294
Barry Kasindorf345c2572008-07-22 21:08:54 +0200295 /* notice a switch from user->kernel or vice versa */
296 if (cpu_buf->last_is_kernel != is_kernel) {
297 cpu_buf->last_is_kernel = is_kernel;
298 add_code(cpu_buf, is_kernel);
299 }
300
301 /* notice a task switch */
302 if (!is_kernel) {
303 task = current;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200304 if (cpu_buf->last_task != task) {
305 cpu_buf->last_task = task;
306 add_code(cpu_buf, (unsigned long)task);
307 }
308 }
309
310 add_code(cpu_buf, ibs_code);
Robert Richtere2fee272008-07-18 17:36:20 +0200311 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
312 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
313 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200314
315 if (ibs_code == IBS_OP_BEGIN) {
Robert Richtere2fee272008-07-18 17:36:20 +0200316 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
317 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
318 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200319 }
320
Robert Richtere2fee272008-07-18 17:36:20 +0200321 if (backtrace_depth)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200322 oprofile_ops.backtrace(regs, backtrace_depth);
323}
324
Robert Richter852402c2008-07-22 21:09:06 +0200325#endif
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
328{
Mike Travis608dfdd2008-04-28 02:14:15 -0700329 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 log_sample(cpu_buf, pc, is_kernel, event);
331}
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333void oprofile_add_trace(unsigned long pc)
334{
Mike Travis608dfdd2008-04-28 02:14:15 -0700335 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 if (!cpu_buf->tracing)
338 return;
339
340 if (nr_available_slots(cpu_buf) < 1) {
341 cpu_buf->tracing = 0;
342 cpu_buf->sample_lost_overflow++;
343 return;
344 }
345
Robert Richterfd13f6c2008-10-19 21:00:09 +0200346 /*
347 * broken frame can give an eip with the same value as an
348 * escape code, abort the trace if we get it
349 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (pc == ESCAPE_CODE) {
351 cpu_buf->tracing = 0;
352 cpu_buf->backtrace_aborted++;
353 return;
354 }
355
356 add_sample(cpu_buf, pc, 0);
357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359/*
360 * This serves to avoid cpu buffer overflow, and makes sure
361 * the task mortuary progresses
362 *
363 * By using schedule_delayed_work_on and then schedule_delayed_work
364 * we guarantee this will stay on the correct cpu
365 */
David Howellsc4028952006-11-22 14:57:56 +0000366static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Robert Richter25ad29132008-09-05 17:12:36 +0200368 struct oprofile_cpu_buffer *b =
David Howellsc4028952006-11-22 14:57:56 +0000369 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200371 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 smp_processor_id(), b->cpu);
Chris J Arges4bd9b9d2008-10-15 11:03:39 -0500373
374 if (!cpu_online(b->cpu)) {
375 cancel_delayed_work(&b->work);
376 return;
377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379 sync_buffer(b->cpu);
380
381 /* don't re-add the work if we're shutting down */
382 if (work_enabled)
383 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
384}