blob: 82f7000a285d3780229a0ac5d49bac5618176f1d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file buffer_sync.c
3 *
Robert Richterae735e92008-12-25 17:26:07 +01004 * @remark Copyright 2002-2009 OProfile authors
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf
Robert Richterae735e92008-12-25 17:26:07 +01009 * @author Robert Richter <robert.richter@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
15 *
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
19 *
20 * See fs/dcookies.c for a description of the dentry/offset
21 * objects.
22 */
23
Davidlohr Bueso11163342015-04-16 12:49:12 -070024#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mm.h>
26#include <linux/workqueue.h>
27#include <linux/notifier.h>
28#include <linux/dcookies.h>
29#include <linux/profile.h>
30#include <linux/module.h>
31#include <linux/fs.h>
Bob Nelson14748552007-07-20 21:39:53 +020032#include <linux/oprofile.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040033#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/gfp.h>
Bob Nelson14748552007-07-20 21:39:53 +020035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "oprofile_stats.h"
37#include "event_buffer.h"
38#include "cpu_buffer.h"
39#include "buffer_sync.h"
Robert Richter73185e02008-07-22 21:08:51 +020040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041static LIST_HEAD(dying_tasks);
42static LIST_HEAD(dead_tasks);
Rusty Russellf7df8ed2009-01-10 21:58:09 -080043static cpumask_var_t marked_cpus;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static DEFINE_SPINLOCK(task_mortuary);
45static void process_task_mortuary(void);
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* Take ownership of the task struct and place it on the
48 * list for processing. Only after two full buffer syncs
49 * does the task eventually get freed, because by then
50 * we are sure we will not reference it again.
Paul E. McKenney4369ef32006-01-08 01:01:35 -080051 * Can be invoked from softirq via RCU callback due to
52 * call_rcu() of the task struct, hence the _irqsave.
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 */
Robert Richter73185e02008-07-22 21:08:51 +020054static int
55task_free_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Paul E. McKenney4369ef32006-01-08 01:01:35 -080057 unsigned long flags;
Robert Richter73185e02008-07-22 21:08:51 +020058 struct task_struct *task = data;
Paul E. McKenney4369ef32006-01-08 01:01:35 -080059 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 list_add(&task->tasks, &dying_tasks);
Paul E. McKenney4369ef32006-01-08 01:01:35 -080061 spin_unlock_irqrestore(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return NOTIFY_OK;
63}
64
65
66/* The task is on its way out. A sync of the buffer means we can catch
67 * any remaining samples for this task.
68 */
Robert Richter73185e02008-07-22 21:08:51 +020069static int
70task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 /* To avoid latency problems, we only process the current CPU,
73 * hoping that most samples for the task are on this CPU
74 */
Ingo Molnar39c715b2005-06-21 17:14:34 -070075 sync_buffer(raw_smp_processor_id());
Robert Richter73185e02008-07-22 21:08:51 +020076 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
79
80/* The task is about to try a do_munmap(). We peek at what it's going to
81 * do, and if it's an executable region, process the samples first, so
82 * we don't lose any. This does not have to be exact, it's a QoI issue
83 * only.
84 */
Robert Richter73185e02008-07-22 21:08:51 +020085static int
86munmap_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 unsigned long addr = (unsigned long)data;
Robert Richter73185e02008-07-22 21:08:51 +020089 struct mm_struct *mm = current->mm;
90 struct vm_area_struct *mpnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92 down_read(&mm->mmap_sem);
93
94 mpnt = find_vma(mm, addr);
95 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
96 up_read(&mm->mmap_sem);
97 /* To avoid latency problems, we only process the current CPU,
98 * hoping that most samples for the task are on this CPU
99 */
Ingo Molnar39c715b2005-06-21 17:14:34 -0700100 sync_buffer(raw_smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 return 0;
102 }
103
104 up_read(&mm->mmap_sem);
105 return 0;
106}
107
Robert Richter73185e02008-07-22 21:08:51 +0200108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* We need to be told about new modules so we don't attribute to a previously
110 * loaded module, or drop the samples on the floor.
111 */
Robert Richter73185e02008-07-22 21:08:51 +0200112static int
113module_load_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
115#ifdef CONFIG_MODULES
116 if (val != MODULE_STATE_COMING)
117 return 0;
118
119 /* FIXME: should we process all CPU buffers ? */
Markus Armbruster59cc1852006-06-25 05:47:33 -0700120 mutex_lock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 add_event_entry(ESCAPE_CODE);
122 add_event_entry(MODULE_LOADED_CODE);
Markus Armbruster59cc1852006-06-25 05:47:33 -0700123 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#endif
125 return 0;
126}
127
Robert Richter73185e02008-07-22 21:08:51 +0200128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129static struct notifier_block task_free_nb = {
130 .notifier_call = task_free_notify,
131};
132
133static struct notifier_block task_exit_nb = {
134 .notifier_call = task_exit_notify,
135};
136
137static struct notifier_block munmap_nb = {
138 .notifier_call = munmap_notify,
139};
140
141static struct notifier_block module_load_nb = {
142 .notifier_call = module_load_notify,
143};
144
Robert Richter6ac65192011-05-26 18:22:54 +0200145static void free_all_tasks(void)
146{
147 /* make sure we don't leak task structs */
148 process_task_mortuary();
149 process_task_mortuary();
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152int sync_start(void)
153{
154 int err;
155
Li Zefan79f55992009-06-15 14:58:26 +0800156 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
Robert Richter4c50d9e2009-01-22 14:14:14 +0100157 return -ENOMEM;
Robert Richter4c50d9e2009-01-22 14:14:14 +0100158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 err = task_handoff_register(&task_free_nb);
160 if (err)
161 goto out1;
162 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
163 if (err)
164 goto out2;
165 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
166 if (err)
167 goto out3;
168 err = register_module_notifier(&module_load_nb);
169 if (err)
170 goto out4;
171
Robert Richter750d8572010-08-13 16:29:04 +0200172 start_cpu_work();
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174out:
175 return err;
176out4:
177 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
178out3:
179 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
180out2:
181 task_handoff_unregister(&task_free_nb);
Robert Richter6ac65192011-05-26 18:22:54 +0200182 free_all_tasks();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183out1:
Robert Richter4c50d9e2009-01-22 14:14:14 +0100184 free_cpumask_var(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 goto out;
186}
187
188
189void sync_stop(void)
190{
Robert Richter750d8572010-08-13 16:29:04 +0200191 end_cpu_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 unregister_module_notifier(&module_load_nb);
193 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
194 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
195 task_handoff_unregister(&task_free_nb);
Robert Richter130c5ce2011-05-26 18:39:35 +0200196 barrier(); /* do all of the above first */
197
Tejun Heo3d7851b2010-10-15 09:51:08 -0400198 flush_cpu_work();
Robert Richter750d8572010-08-13 16:29:04 +0200199
Robert Richter6ac65192011-05-26 18:22:54 +0200200 free_all_tasks();
Robert Richter4c50d9e2009-01-22 14:14:14 +0100201 free_cpumask_var(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Jan Blunck448678a2008-02-14 19:38:36 -0800204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205/* Optimisation. We can manage without taking the dcookie sem
206 * because we cannot reach this code without at least one
207 * dcookie user still being registered (namely, the reader
208 * of the event buffer). */
Jan Blunck448678a2008-02-14 19:38:36 -0800209static inline unsigned long fast_get_dcookie(struct path *path)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
211 unsigned long cookie;
Jan Blunck448678a2008-02-14 19:38:36 -0800212
Nick Pigginc2452f32008-12-01 09:33:43 +0100213 if (path->dentry->d_flags & DCACHE_COOKIE)
Jan Blunck448678a2008-02-14 19:38:36 -0800214 return (unsigned long)path->dentry;
215 get_dcookie(path, &cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return cookie;
217}
218
Jan Blunck448678a2008-02-14 19:38:36 -0800219
Konstantin Khlebnikov2dd8ad82012-10-08 16:28:51 -0700220/* Look up the dcookie for the task's mm->exe_file,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 * which corresponds loosely to "application name". This is
222 * not strictly necessary but allows oprofile to associate
223 * shared-library samples with particular applications
224 */
Robert Richter73185e02008-07-22 21:08:51 +0200225static unsigned long get_exec_dcookie(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
John Levon0c0a4002005-06-23 22:02:47 -0700227 unsigned long cookie = NO_COOKIE;
Davidlohr Bueso11163342015-04-16 12:49:12 -0700228 struct file *exe_file;
Robert Richter73185e02008-07-22 21:08:51 +0200229
Davidlohr Bueso11163342015-04-16 12:49:12 -0700230 if (!mm)
231 goto done;
Robert Richter73185e02008-07-22 21:08:51 +0200232
Davidlohr Bueso11163342015-04-16 12:49:12 -0700233 exe_file = get_mm_exe_file(mm);
234 if (!exe_file)
235 goto done;
236
237 cookie = fast_get_dcookie(&exe_file->f_path);
238 fput(exe_file);
239done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 return cookie;
241}
242
243
244/* Convert the EIP value of a sample into a persistent dentry/offset
245 * pair that can then be added to the global event buffer. We make
246 * sure to do this lookup before a mm->mmap modification happens so
247 * we don't lose track.
Davidlohr Bueso11163342015-04-16 12:49:12 -0700248 *
249 * The caller must ensure the mm is not nil (ie: not a kernel thread).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 */
Robert Richter73185e02008-07-22 21:08:51 +0200251static unsigned long
252lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
John Levon0c0a4002005-06-23 22:02:47 -0700254 unsigned long cookie = NO_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200255 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Davidlohr Bueso11163342015-04-16 12:49:12 -0700257 down_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
Robert Richter73185e02008-07-22 21:08:51 +0200259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 if (addr < vma->vm_start || addr >= vma->vm_end)
261 continue;
262
John Levon0c0a4002005-06-23 22:02:47 -0700263 if (vma->vm_file) {
Jan Blunck448678a2008-02-14 19:38:36 -0800264 cookie = fast_get_dcookie(&vma->vm_file->f_path);
John Levon0c0a4002005-06-23 22:02:47 -0700265 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
266 vma->vm_start;
267 } else {
268 /* must be an anonymous map */
269 *offset = addr;
270 }
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 break;
273 }
274
John Levon0c0a4002005-06-23 22:02:47 -0700275 if (!vma)
276 cookie = INVALID_COOKIE;
Davidlohr Bueso11163342015-04-16 12:49:12 -0700277 up_read(&mm->mmap_sem);
John Levon0c0a4002005-06-23 22:02:47 -0700278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 return cookie;
280}
281
John Levon0c0a4002005-06-23 22:02:47 -0700282static unsigned long last_cookie = INVALID_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284static void add_cpu_switch(int i)
285{
286 add_event_entry(ESCAPE_CODE);
287 add_event_entry(CPU_SWITCH_CODE);
288 add_event_entry(i);
John Levon0c0a4002005-06-23 22:02:47 -0700289 last_cookie = INVALID_COOKIE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
292static void add_kernel_ctx_switch(unsigned int in_kernel)
293{
294 add_event_entry(ESCAPE_CODE);
295 if (in_kernel)
Robert Richter73185e02008-07-22 21:08:51 +0200296 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 else
Robert Richter73185e02008-07-22 21:08:51 +0200298 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
Robert Richter73185e02008-07-22 21:08:51 +0200300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301static void
Robert Richter73185e02008-07-22 21:08:51 +0200302add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
304 add_event_entry(ESCAPE_CODE);
Robert Richter73185e02008-07-22 21:08:51 +0200305 add_event_entry(CTX_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 add_event_entry(task->pid);
307 add_event_entry(cookie);
308 /* Another code for daemon back-compat */
309 add_event_entry(ESCAPE_CODE);
310 add_event_entry(CTX_TGID_CODE);
311 add_event_entry(task->tgid);
312}
313
Robert Richter73185e02008-07-22 21:08:51 +0200314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315static void add_cookie_switch(unsigned long cookie)
316{
317 add_event_entry(ESCAPE_CODE);
318 add_event_entry(COOKIE_SWITCH_CODE);
319 add_event_entry(cookie);
320}
321
Robert Richter73185e02008-07-22 21:08:51 +0200322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323static void add_trace_begin(void)
324{
325 add_event_entry(ESCAPE_CODE);
326 add_event_entry(TRACE_BEGIN_CODE);
327}
328
Robert Richter1acda872009-01-05 10:35:31 +0100329static void add_data(struct op_entry *entry, struct mm_struct *mm)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200330{
Robert Richter1acda872009-01-05 10:35:31 +0100331 unsigned long code, pc, val;
332 unsigned long cookie;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200333 off_t offset;
334
Robert Richter1acda872009-01-05 10:35:31 +0100335 if (!op_cpu_buffer_get_data(entry, &code))
Robert Richterdbe6e282008-12-16 11:01:18 +0100336 return;
Robert Richter1acda872009-01-05 10:35:31 +0100337 if (!op_cpu_buffer_get_data(entry, &pc))
338 return;
339 if (!op_cpu_buffer_get_size(entry))
340 return;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200341
342 if (mm) {
Robert Richterd358e752009-01-05 13:14:04 +0100343 cookie = lookup_dcookie(mm, pc, &offset);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200344
Robert Richterd358e752009-01-05 13:14:04 +0100345 if (cookie == NO_COOKIE)
346 offset = pc;
347 if (cookie == INVALID_COOKIE) {
Barry Kasindorf345c2572008-07-22 21:08:54 +0200348 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
Robert Richterd358e752009-01-05 13:14:04 +0100349 offset = pc;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200350 }
Robert Richterd358e752009-01-05 13:14:04 +0100351 if (cookie != last_cookie) {
352 add_cookie_switch(cookie);
353 last_cookie = cookie;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200354 }
355 } else
Robert Richterd358e752009-01-05 13:14:04 +0100356 offset = pc;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200357
358 add_event_entry(ESCAPE_CODE);
359 add_event_entry(code);
360 add_event_entry(offset); /* Offset from Dcookie */
361
Robert Richter1acda872009-01-05 10:35:31 +0100362 while (op_cpu_buffer_get_data(entry, &val))
363 add_event_entry(val);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200364}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Robert Richter6368a1f2008-12-29 18:44:21 +0100366static inline void add_sample_entry(unsigned long offset, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
368 add_event_entry(offset);
369 add_event_entry(event);
370}
371
372
Robert Richter9741b302008-12-18 19:44:20 +0100373/*
374 * Add a sample to the global event buffer. If possible the
375 * sample is converted into a persistent dentry/offset pair
376 * for later lookup from userspace. Return 0 on failure.
377 */
378static int
379add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 unsigned long cookie;
382 off_t offset;
Robert Richter73185e02008-07-22 21:08:51 +0200383
Robert Richter9741b302008-12-18 19:44:20 +0100384 if (in_kernel) {
385 add_sample_entry(s->eip, s->event);
386 return 1;
387 }
388
389 /* add userspace sample */
390
391 if (!mm) {
392 atomic_inc(&oprofile_stats.sample_lost_no_mm);
393 return 0;
394 }
395
Robert Richter73185e02008-07-22 21:08:51 +0200396 cookie = lookup_dcookie(mm, s->eip, &offset);
397
John Levon0c0a4002005-06-23 22:02:47 -0700398 if (cookie == INVALID_COOKIE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
400 return 0;
401 }
402
403 if (cookie != last_cookie) {
404 add_cookie_switch(cookie);
405 last_cookie = cookie;
406 }
407
408 add_sample_entry(offset, s->event);
409
410 return 1;
411}
412
Robert Richter73185e02008-07-22 21:08:51 +0200413
Robert Richter73185e02008-07-22 21:08:51 +0200414static void release_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
416 if (!mm)
417 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 mmput(mm);
419}
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421static inline int is_code(unsigned long val)
422{
423 return val == ESCAPE_CODE;
424}
Robert Richter73185e02008-07-22 21:08:51 +0200425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/* Move tasks along towards death. Any tasks on dead_tasks
428 * will definitely have no remaining references in any
429 * CPU buffers at this point, because we use two lists,
430 * and to have reached the list, it must have gone through
431 * one full sync already.
432 */
433static void process_task_mortuary(void)
434{
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800435 unsigned long flags;
436 LIST_HEAD(local_dead_tasks);
Robert Richter73185e02008-07-22 21:08:51 +0200437 struct task_struct *task;
438 struct task_struct *ttask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800440 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800442 list_splice_init(&dead_tasks, &local_dead_tasks);
443 list_splice_init(&dying_tasks, &dead_tasks);
444
445 spin_unlock_irqrestore(&task_mortuary, flags);
446
447 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 list_del(&task->tasks);
449 free_task(task);
450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452
453
454static void mark_done(int cpu)
455{
456 int i;
457
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800458 cpumask_set_cpu(cpu, marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 for_each_online_cpu(i) {
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800461 if (!cpumask_test_cpu(i, marked_cpus))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 return;
463 }
464
465 /* All CPUs have been processed at least once,
466 * we can process the mortuary once
467 */
468 process_task_mortuary();
469
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800470 cpumask_clear(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471}
472
473
474/* FIXME: this is not sufficient if we implement syscall barrier backtrace
475 * traversal, the code switch to sb_sample_start at first kernel enter/exit
476 * switch so we need a fifth state and some special handling in sync_buffer()
477 */
478typedef enum {
479 sb_bt_ignore = -2,
480 sb_buffer_start,
481 sb_bt_start,
482 sb_sample_start,
483} sync_buffer_state;
484
485/* Sync one of the CPU's buffers into the global event buffer.
486 * Here we need to go through each batch of samples punctuated
487 * by context switch notes, taking the task's mmap_sem and doing
488 * lookup in task->mm->mmap to convert EIP into dcookie/offset
489 * value.
490 */
491void sync_buffer(int cpu)
492{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct mm_struct *mm = NULL;
Robert Richterfd7826d2008-09-26 17:50:31 -0400494 struct mm_struct *oldmm;
Robert Richterbd7dc462009-01-06 03:56:50 +0100495 unsigned long val;
Robert Richter73185e02008-07-22 21:08:51 +0200496 struct task_struct *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 unsigned long cookie = 0;
498 int in_kernel = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 sync_buffer_state state = sb_buffer_start;
Barry Kasindorf9b1f2612008-07-15 00:10:36 +0200500 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 unsigned long available;
Robert Richterae735e92008-12-25 17:26:07 +0100502 unsigned long flags;
Robert Richter2d87b142008-12-30 04:10:46 +0100503 struct op_entry entry;
504 struct op_sample *sample;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Markus Armbruster59cc1852006-06-25 05:47:33 -0700506 mutex_lock(&buffer_mutex);
Robert Richter73185e02008-07-22 21:08:51 +0200507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 add_cpu_switch(cpu);
509
Robert Richter6d2c53f2008-12-24 16:53:53 +0100510 op_cpu_buffer_reset(cpu);
511 available = op_cpu_buffer_entries(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 for (i = 0; i < available; ++i) {
Robert Richter2d87b142008-12-30 04:10:46 +0100514 sample = op_cpu_buffer_read_entry(&entry, cpu);
515 if (!sample)
Robert Richter6dad8282008-12-09 01:21:32 +0100516 break;
Robert Richter73185e02008-07-22 21:08:51 +0200517
Robert Richter2d87b142008-12-30 04:10:46 +0100518 if (is_code(sample->eip)) {
Robert Richterae735e92008-12-25 17:26:07 +0100519 flags = sample->event;
520 if (flags & TRACE_BEGIN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 state = sb_bt_start;
522 add_trace_begin();
Robert Richterae735e92008-12-25 17:26:07 +0100523 }
524 if (flags & KERNEL_CTX_SWITCH) {
525 /* kernel/userspace switch */
526 in_kernel = flags & IS_KERNEL;
527 if (state == sb_buffer_start)
528 state = sb_sample_start;
529 add_kernel_ctx_switch(flags & IS_KERNEL);
530 }
Robert Richterbd7dc462009-01-06 03:56:50 +0100531 if (flags & USER_CTX_SWITCH
532 && op_cpu_buffer_get_data(&entry, &val)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 /* userspace context switch */
Robert Richterbd7dc462009-01-06 03:56:50 +0100534 new = (struct task_struct *)val;
Robert Richterfd7826d2008-09-26 17:50:31 -0400535 oldmm = mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 release_mm(oldmm);
Davidlohr Bueso11163342015-04-16 12:49:12 -0700537 mm = get_task_mm(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (mm != oldmm)
539 cookie = get_exec_dcookie(mm);
540 add_user_ctx_switch(new, cookie);
541 }
Robert Richter1acda872009-01-05 10:35:31 +0100542 if (op_cpu_buffer_get_size(&entry))
543 add_data(&entry, mm);
Robert Richter317f33b2008-12-18 19:44:20 +0100544 continue;
545 }
546
547 if (state < sb_bt_start)
548 /* ignore sample */
549 continue;
550
Robert Richter2d87b142008-12-30 04:10:46 +0100551 if (add_sample(mm, sample, in_kernel))
Robert Richter317f33b2008-12-18 19:44:20 +0100552 continue;
553
554 /* ignore backtraces if failed to add a sample */
555 if (state == sb_bt_start) {
556 state = sb_bt_ignore;
557 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 }
560 release_mm(mm);
561
562 mark_done(cpu);
563
Markus Armbruster59cc1852006-06-25 05:47:33 -0700564 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
Carl Lovea5598ca2008-10-14 23:37:01 +0000566
567/* The function can be used to add a buffer worth of data directly to
568 * the kernel buffer. The buffer is assumed to be a circular buffer.
569 * Take the entries from index start and end at index end, wrapping
570 * at max_entries.
571 */
572void oprofile_put_buff(unsigned long *buf, unsigned int start,
573 unsigned int stop, unsigned int max)
574{
575 int i;
576
577 i = start;
578
579 mutex_lock(&buffer_mutex);
580 while (i != stop) {
581 add_event_entry(buf[i++]);
582
583 if (i >= max)
584 i = 0;
585 }
586
587 mutex_unlock(&buffer_mutex);
588}
589