blob: e1782d2df09f444555abbee971682cafbb4b246e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file buffer_sync.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This is the core of the buffer management. Each
11 * CPU buffer is processed and entered into the
12 * global event buffer. Such processing is necessary
13 * in several circumstances, mentioned below.
14 *
15 * The processing does the job of converting the
16 * transitory EIP value into a persistent dentry/offset
17 * value that the profiler can record at its leisure.
18 *
19 * See fs/dcookies.c for a description of the dentry/offset
20 * objects.
21 */
22
23#include <linux/mm.h>
24#include <linux/workqueue.h>
25#include <linux/notifier.h>
26#include <linux/dcookies.h>
27#include <linux/profile.h>
28#include <linux/module.h>
29#include <linux/fs.h>
Bob Nelson14748552007-07-20 21:39:53 +020030#include <linux/oprofile.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040031#include <linux/sched.h>
Bob Nelson14748552007-07-20 21:39:53 +020032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "oprofile_stats.h"
34#include "event_buffer.h"
35#include "cpu_buffer.h"
36#include "buffer_sync.h"
Robert Richter73185e02008-07-22 21:08:51 +020037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static LIST_HEAD(dying_tasks);
39static LIST_HEAD(dead_tasks);
40static cpumask_t marked_cpus = CPU_MASK_NONE;
41static DEFINE_SPINLOCK(task_mortuary);
42static void process_task_mortuary(void);
43
44
45/* Take ownership of the task struct and place it on the
46 * list for processing. Only after two full buffer syncs
47 * does the task eventually get freed, because by then
48 * we are sure we will not reference it again.
Paul E. McKenney4369ef32006-01-08 01:01:35 -080049 * Can be invoked from softirq via RCU callback due to
50 * call_rcu() of the task struct, hence the _irqsave.
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 */
Robert Richter73185e02008-07-22 21:08:51 +020052static int
53task_free_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Paul E. McKenney4369ef32006-01-08 01:01:35 -080055 unsigned long flags;
Robert Richter73185e02008-07-22 21:08:51 +020056 struct task_struct *task = data;
Paul E. McKenney4369ef32006-01-08 01:01:35 -080057 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 list_add(&task->tasks, &dying_tasks);
Paul E. McKenney4369ef32006-01-08 01:01:35 -080059 spin_unlock_irqrestore(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 return NOTIFY_OK;
61}
62
63
64/* The task is on its way out. A sync of the buffer means we can catch
65 * any remaining samples for this task.
66 */
Robert Richter73185e02008-07-22 21:08:51 +020067static int
68task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
70 /* To avoid latency problems, we only process the current CPU,
71 * hoping that most samples for the task are on this CPU
72 */
Ingo Molnar39c715b2005-06-21 17:14:34 -070073 sync_buffer(raw_smp_processor_id());
Robert Richter73185e02008-07-22 21:08:51 +020074 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
77
78/* The task is about to try a do_munmap(). We peek at what it's going to
79 * do, and if it's an executable region, process the samples first, so
80 * we don't lose any. This does not have to be exact, it's a QoI issue
81 * only.
82 */
Robert Richter73185e02008-07-22 21:08:51 +020083static int
84munmap_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085{
86 unsigned long addr = (unsigned long)data;
Robert Richter73185e02008-07-22 21:08:51 +020087 struct mm_struct *mm = current->mm;
88 struct vm_area_struct *mpnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 down_read(&mm->mmap_sem);
91
92 mpnt = find_vma(mm, addr);
93 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
94 up_read(&mm->mmap_sem);
95 /* To avoid latency problems, we only process the current CPU,
96 * hoping that most samples for the task are on this CPU
97 */
Ingo Molnar39c715b2005-06-21 17:14:34 -070098 sync_buffer(raw_smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return 0;
100 }
101
102 up_read(&mm->mmap_sem);
103 return 0;
104}
105
Robert Richter73185e02008-07-22 21:08:51 +0200106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107/* We need to be told about new modules so we don't attribute to a previously
108 * loaded module, or drop the samples on the floor.
109 */
Robert Richter73185e02008-07-22 21:08:51 +0200110static int
111module_load_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
113#ifdef CONFIG_MODULES
114 if (val != MODULE_STATE_COMING)
115 return 0;
116
117 /* FIXME: should we process all CPU buffers ? */
Markus Armbruster59cc1852006-06-25 05:47:33 -0700118 mutex_lock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 add_event_entry(ESCAPE_CODE);
120 add_event_entry(MODULE_LOADED_CODE);
Markus Armbruster59cc1852006-06-25 05:47:33 -0700121 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#endif
123 return 0;
124}
125
Robert Richter73185e02008-07-22 21:08:51 +0200126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static struct notifier_block task_free_nb = {
128 .notifier_call = task_free_notify,
129};
130
131static struct notifier_block task_exit_nb = {
132 .notifier_call = task_exit_notify,
133};
134
135static struct notifier_block munmap_nb = {
136 .notifier_call = munmap_notify,
137};
138
139static struct notifier_block module_load_nb = {
140 .notifier_call = module_load_notify,
141};
142
Robert Richter73185e02008-07-22 21:08:51 +0200143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static void end_sync(void)
145{
146 end_cpu_work();
147 /* make sure we don't leak task structs */
148 process_task_mortuary();
149 process_task_mortuary();
150}
151
152
153int sync_start(void)
154{
155 int err;
156
157 start_cpu_work();
158
159 err = task_handoff_register(&task_free_nb);
160 if (err)
161 goto out1;
162 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
163 if (err)
164 goto out2;
165 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
166 if (err)
167 goto out3;
168 err = register_module_notifier(&module_load_nb);
169 if (err)
170 goto out4;
171
172out:
173 return err;
174out4:
175 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
176out3:
177 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
178out2:
179 task_handoff_unregister(&task_free_nb);
180out1:
181 end_sync();
182 goto out;
183}
184
185
186void sync_stop(void)
187{
188 unregister_module_notifier(&module_load_nb);
189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
191 task_handoff_unregister(&task_free_nb);
192 end_sync();
193}
194
Jan Blunck448678a2008-02-14 19:38:36 -0800195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196/* Optimisation. We can manage without taking the dcookie sem
197 * because we cannot reach this code without at least one
198 * dcookie user still being registered (namely, the reader
199 * of the event buffer). */
Jan Blunck448678a2008-02-14 19:38:36 -0800200static inline unsigned long fast_get_dcookie(struct path *path)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned long cookie;
Jan Blunck448678a2008-02-14 19:38:36 -0800203
204 if (path->dentry->d_cookie)
205 return (unsigned long)path->dentry;
206 get_dcookie(path, &cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 return cookie;
208}
209
Jan Blunck448678a2008-02-14 19:38:36 -0800210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
212 * which corresponds loosely to "application name". This is
213 * not strictly necessary but allows oprofile to associate
214 * shared-library samples with particular applications
215 */
Robert Richter73185e02008-07-22 21:08:51 +0200216static unsigned long get_exec_dcookie(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
John Levon0c0a4002005-06-23 22:02:47 -0700218 unsigned long cookie = NO_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200219 struct vm_area_struct *vma;
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 if (!mm)
222 goto out;
Robert Richter73185e02008-07-22 21:08:51 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 for (vma = mm->mmap; vma; vma = vma->vm_next) {
225 if (!vma->vm_file)
226 continue;
227 if (!(vma->vm_flags & VM_EXECUTABLE))
228 continue;
Jan Blunck448678a2008-02-14 19:38:36 -0800229 cookie = fast_get_dcookie(&vma->vm_file->f_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 break;
231 }
232
233out:
234 return cookie;
235}
236
237
238/* Convert the EIP value of a sample into a persistent dentry/offset
239 * pair that can then be added to the global event buffer. We make
240 * sure to do this lookup before a mm->mmap modification happens so
241 * we don't lose track.
242 */
Robert Richter73185e02008-07-22 21:08:51 +0200243static unsigned long
244lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
John Levon0c0a4002005-06-23 22:02:47 -0700246 unsigned long cookie = NO_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200247 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
Robert Richter73185e02008-07-22 21:08:51 +0200250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 if (addr < vma->vm_start || addr >= vma->vm_end)
252 continue;
253
John Levon0c0a4002005-06-23 22:02:47 -0700254 if (vma->vm_file) {
Jan Blunck448678a2008-02-14 19:38:36 -0800255 cookie = fast_get_dcookie(&vma->vm_file->f_path);
John Levon0c0a4002005-06-23 22:02:47 -0700256 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
257 vma->vm_start;
258 } else {
259 /* must be an anonymous map */
260 *offset = addr;
261 }
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 break;
264 }
265
John Levon0c0a4002005-06-23 22:02:47 -0700266 if (!vma)
267 cookie = INVALID_COOKIE;
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return cookie;
270}
271
Robert Richter5e11f982008-07-22 21:08:52 +0200272static void increment_tail(struct oprofile_cpu_buffer *b)
273{
274 unsigned long new_tail = b->tail_pos + 1;
275
Barry Kasindorf345c2572008-07-22 21:08:54 +0200276 rmb(); /* be sure fifo pointers are synchromized */
Robert Richter5e11f982008-07-22 21:08:52 +0200277
278 if (new_tail < b->buffer_size)
279 b->tail_pos = new_tail;
280 else
281 b->tail_pos = 0;
282}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
John Levon0c0a4002005-06-23 22:02:47 -0700284static unsigned long last_cookie = INVALID_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286static void add_cpu_switch(int i)
287{
288 add_event_entry(ESCAPE_CODE);
289 add_event_entry(CPU_SWITCH_CODE);
290 add_event_entry(i);
John Levon0c0a4002005-06-23 22:02:47 -0700291 last_cookie = INVALID_COOKIE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294static void add_kernel_ctx_switch(unsigned int in_kernel)
295{
296 add_event_entry(ESCAPE_CODE);
297 if (in_kernel)
Robert Richter73185e02008-07-22 21:08:51 +0200298 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 else
Robert Richter73185e02008-07-22 21:08:51 +0200300 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
Robert Richter73185e02008-07-22 21:08:51 +0200302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303static void
Robert Richter73185e02008-07-22 21:08:51 +0200304add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
306 add_event_entry(ESCAPE_CODE);
Robert Richter73185e02008-07-22 21:08:51 +0200307 add_event_entry(CTX_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 add_event_entry(task->pid);
309 add_event_entry(cookie);
310 /* Another code for daemon back-compat */
311 add_event_entry(ESCAPE_CODE);
312 add_event_entry(CTX_TGID_CODE);
313 add_event_entry(task->tgid);
314}
315
Robert Richter73185e02008-07-22 21:08:51 +0200316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317static void add_cookie_switch(unsigned long cookie)
318{
319 add_event_entry(ESCAPE_CODE);
320 add_event_entry(COOKIE_SWITCH_CODE);
321 add_event_entry(cookie);
322}
323
Robert Richter73185e02008-07-22 21:08:51 +0200324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325static void add_trace_begin(void)
326{
327 add_event_entry(ESCAPE_CODE);
328 add_event_entry(TRACE_BEGIN_CODE);
329}
330
Barry Kasindorf345c2572008-07-22 21:08:54 +0200331#define IBS_FETCH_CODE_SIZE 2
332#define IBS_OP_CODE_SIZE 5
333#define IBS_EIP(offset) \
334 (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
335#define IBS_EVENT(offset) \
336 (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
337
338/*
339 * Add IBS fetch and op entries to event buffer
340 */
341static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
342 int in_kernel, struct mm_struct *mm)
343{
344 unsigned long rip;
345 int i, count;
346 unsigned long ibs_cookie = 0;
347 off_t offset;
348
349 increment_tail(cpu_buf); /* move to RIP entry */
350
351 rip = IBS_EIP(cpu_buf->tail_pos);
352
353#ifdef __LP64__
354 rip += IBS_EVENT(cpu_buf->tail_pos) << 32;
355#endif
356
357 if (mm) {
358 ibs_cookie = lookup_dcookie(mm, rip, &offset);
359
360 if (ibs_cookie == NO_COOKIE)
361 offset = rip;
362 if (ibs_cookie == INVALID_COOKIE) {
363 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
364 offset = rip;
365 }
366 if (ibs_cookie != last_cookie) {
367 add_cookie_switch(ibs_cookie);
368 last_cookie = ibs_cookie;
369 }
370 } else
371 offset = rip;
372
373 add_event_entry(ESCAPE_CODE);
374 add_event_entry(code);
375 add_event_entry(offset); /* Offset from Dcookie */
376
377 /* we send the Dcookie offset, but send the raw Linear Add also*/
378 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
379 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
380
381 if (code == IBS_FETCH_CODE)
382 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
383 else
384 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
385
386 for (i = 0; i < count; i++) {
387 increment_tail(cpu_buf);
388 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
389 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
390 }
391}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393static void add_sample_entry(unsigned long offset, unsigned long event)
394{
395 add_event_entry(offset);
396 add_event_entry(event);
397}
398
399
Robert Richter73185e02008-07-22 21:08:51 +0200400static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
402 unsigned long cookie;
403 off_t offset;
Robert Richter73185e02008-07-22 21:08:51 +0200404
405 cookie = lookup_dcookie(mm, s->eip, &offset);
406
John Levon0c0a4002005-06-23 22:02:47 -0700407 if (cookie == INVALID_COOKIE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
409 return 0;
410 }
411
412 if (cookie != last_cookie) {
413 add_cookie_switch(cookie);
414 last_cookie = cookie;
415 }
416
417 add_sample_entry(offset, s->event);
418
419 return 1;
420}
421
Robert Richter73185e02008-07-22 21:08:51 +0200422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/* Add a sample to the global event buffer. If possible the
424 * sample is converted into a persistent dentry/offset pair
425 * for later lookup from userspace.
426 */
427static int
Robert Richter73185e02008-07-22 21:08:51 +0200428add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 if (in_kernel) {
431 add_sample_entry(s->eip, s->event);
432 return 1;
433 } else if (mm) {
434 return add_us_sample(mm, s);
435 } else {
436 atomic_inc(&oprofile_stats.sample_lost_no_mm);
437 }
438 return 0;
439}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Robert Richter73185e02008-07-22 21:08:51 +0200441
442static void release_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 if (!mm)
445 return;
446 up_read(&mm->mmap_sem);
447 mmput(mm);
448}
449
450
Robert Richter73185e02008-07-22 21:08:51 +0200451static struct mm_struct *take_tasks_mm(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452{
Robert Richter73185e02008-07-22 21:08:51 +0200453 struct mm_struct *mm = get_task_mm(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if (mm)
455 down_read(&mm->mmap_sem);
456 return mm;
457}
458
459
460static inline int is_code(unsigned long val)
461{
462 return val == ESCAPE_CODE;
463}
Robert Richter73185e02008-07-22 21:08:51 +0200464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466/* "acquire" as many cpu buffer slots as we can */
Robert Richter73185e02008-07-22 21:08:51 +0200467static unsigned long get_slots(struct oprofile_cpu_buffer *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468{
469 unsigned long head = b->head_pos;
470 unsigned long tail = b->tail_pos;
471
472 /*
473 * Subtle. This resets the persistent last_task
474 * and in_kernel values used for switching notes.
475 * BUT, there is a small window between reading
476 * head_pos, and this call, that means samples
477 * can appear at the new head position, but not
478 * be prefixed with the notes for switching
479 * kernel mode or a task switch. This small hole
480 * can lead to mis-attribution or samples where
481 * we don't know if it's in the kernel or not,
482 * at the start of an event buffer.
483 */
484 cpu_buffer_reset(b);
485
486 if (head >= tail)
487 return head - tail;
488
489 return head + (b->buffer_size - tail);
490}
491
492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493/* Move tasks along towards death. Any tasks on dead_tasks
494 * will definitely have no remaining references in any
495 * CPU buffers at this point, because we use two lists,
496 * and to have reached the list, it must have gone through
497 * one full sync already.
498 */
499static void process_task_mortuary(void)
500{
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800501 unsigned long flags;
502 LIST_HEAD(local_dead_tasks);
Robert Richter73185e02008-07-22 21:08:51 +0200503 struct task_struct *task;
504 struct task_struct *ttask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800506 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800508 list_splice_init(&dead_tasks, &local_dead_tasks);
509 list_splice_init(&dying_tasks, &dead_tasks);
510
511 spin_unlock_irqrestore(&task_mortuary, flags);
512
513 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 list_del(&task->tasks);
515 free_task(task);
516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
519
520static void mark_done(int cpu)
521{
522 int i;
523
524 cpu_set(cpu, marked_cpus);
525
526 for_each_online_cpu(i) {
527 if (!cpu_isset(i, marked_cpus))
528 return;
529 }
530
531 /* All CPUs have been processed at least once,
532 * we can process the mortuary once
533 */
534 process_task_mortuary();
535
536 cpus_clear(marked_cpus);
537}
538
539
540/* FIXME: this is not sufficient if we implement syscall barrier backtrace
541 * traversal, the code switch to sb_sample_start at first kernel enter/exit
542 * switch so we need a fifth state and some special handling in sync_buffer()
543 */
544typedef enum {
545 sb_bt_ignore = -2,
546 sb_buffer_start,
547 sb_bt_start,
548 sb_sample_start,
549} sync_buffer_state;
550
551/* Sync one of the CPU's buffers into the global event buffer.
552 * Here we need to go through each batch of samples punctuated
553 * by context switch notes, taking the task's mmap_sem and doing
554 * lookup in task->mm->mmap to convert EIP into dcookie/offset
555 * value.
556 */
557void sync_buffer(int cpu)
558{
Mike Travis608dfdd2008-04-28 02:14:15 -0700559 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 struct mm_struct *mm = NULL;
Robert Richter73185e02008-07-22 21:08:51 +0200561 struct task_struct *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 unsigned long cookie = 0;
563 int in_kernel = 1;
564 unsigned int i;
565 sync_buffer_state state = sb_buffer_start;
566 unsigned long available;
567
Markus Armbruster59cc1852006-06-25 05:47:33 -0700568 mutex_lock(&buffer_mutex);
Robert Richter73185e02008-07-22 21:08:51 +0200569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 add_cpu_switch(cpu);
571
572 /* Remember, only we can modify tail_pos */
573
574 available = get_slots(cpu_buf);
575
576 for (i = 0; i < available; ++i) {
Robert Richter73185e02008-07-22 21:08:51 +0200577 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (is_code(s->eip)) {
580 if (s->event <= CPU_IS_KERNEL) {
581 /* kernel/userspace switch */
582 in_kernel = s->event;
583 if (state == sb_buffer_start)
584 state = sb_sample_start;
585 add_kernel_ctx_switch(s->event);
586 } else if (s->event == CPU_TRACE_BEGIN) {
587 state = sb_bt_start;
588 add_trace_begin();
Barry Kasindorf345c2572008-07-22 21:08:54 +0200589 } else if (s->event == IBS_FETCH_BEGIN) {
590 state = sb_bt_start;
591 add_ibs_begin(cpu_buf,
592 IBS_FETCH_CODE, in_kernel, mm);
593 } else if (s->event == IBS_OP_BEGIN) {
594 state = sb_bt_start;
595 add_ibs_begin(cpu_buf,
596 IBS_OP_CODE, in_kernel, mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 } else {
Robert Richter73185e02008-07-22 21:08:51 +0200598 struct mm_struct *oldmm = mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 /* userspace context switch */
601 new = (struct task_struct *)s->event;
602
603 release_mm(oldmm);
604 mm = take_tasks_mm(new);
605 if (mm != oldmm)
606 cookie = get_exec_dcookie(mm);
607 add_user_ctx_switch(new, cookie);
608 }
Robert Richter73185e02008-07-22 21:08:51 +0200609 } else if (state >= sb_bt_start &&
610 !add_sample(mm, s, in_kernel)) {
611 if (state == sb_bt_start) {
612 state = sb_bt_ignore;
613 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 }
615 }
616
617 increment_tail(cpu_buf);
618 }
619 release_mm(mm);
620
621 mark_done(cpu);
622
Markus Armbruster59cc1852006-06-25 05:47:33 -0700623 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}