blob: 1a6cdbf0d0911257fb4d2817b25a00122b96ea25 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/file.h>
17#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020019#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020020#include <linux/sysfs.h>
21#include <linux/dcache.h>
22#include <linux/percpu.h>
23#include <linux/ptrace.h>
24#include <linux/vmstat.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020025#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/hardirq.h>
27#include <linux/rculist.h>
28#include <linux/uaccess.h>
29#include <linux/syscalls.h>
30#include <linux/anon_inodes.h>
31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080033#include <linux/ftrace_event.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020034
35#include <asm/irq_regs.h>
36
37/*
38 * Each CPU has a list of per CPU events:
39 */
Xiao Guangrongaa5452d2009-12-09 11:28:13 +080040static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041
42int perf_max_events __read_mostly = 1;
43static int perf_reserved_percpu __read_mostly;
44static int perf_overcommit __read_mostly = 1;
45
46static atomic_t nr_events __read_mostly;
47static atomic_t nr_mmap_events __read_mostly;
48static atomic_t nr_comm_events __read_mostly;
49static atomic_t nr_task_events __read_mostly;
50
51/*
52 * perf event paranoia level:
53 * -1 - not paranoid at all
54 * 0 - disallow raw tracepoint access for unpriv
55 * 1 - disallow cpu events for unpriv
56 * 2 - disallow kernel profiling for unpriv
57 */
58int sysctl_perf_event_paranoid __read_mostly = 1;
59
Ingo Molnarcdd6c482009-09-21 12:02:48 +020060int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
61
62/*
63 * max perf event sample rate
64 */
65int sysctl_perf_event_sample_rate __read_mostly = 100000;
66
67static atomic64_t perf_event_id;
68
69/*
70 * Lock for (sysadmin-configurable) event reservations:
71 */
72static DEFINE_SPINLOCK(perf_resource_lock);
73
Ingo Molnarcdd6c482009-09-21 12:02:48 +020074void __weak perf_event_print_debug(void) { }
75
Peter Zijlstra33696fc2010-06-14 08:49:00 +020076void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020077{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020078 int *count = this_cpu_ptr(pmu->pmu_disable_count);
79 if (!(*count)++)
80 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020081}
82
Peter Zijlstra33696fc2010-06-14 08:49:00 +020083void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020084{
Peter Zijlstra33696fc2010-06-14 08:49:00 +020085 int *count = this_cpu_ptr(pmu->pmu_disable_count);
86 if (!--(*count))
87 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020088}
89
90static void get_ctx(struct perf_event_context *ctx)
91{
92 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
93}
94
95static void free_ctx(struct rcu_head *head)
96{
97 struct perf_event_context *ctx;
98
99 ctx = container_of(head, struct perf_event_context, rcu_head);
100 kfree(ctx);
101}
102
103static void put_ctx(struct perf_event_context *ctx)
104{
105 if (atomic_dec_and_test(&ctx->refcount)) {
106 if (ctx->parent_ctx)
107 put_ctx(ctx->parent_ctx);
108 if (ctx->task)
109 put_task_struct(ctx->task);
110 call_rcu(&ctx->rcu_head, free_ctx);
111 }
112}
113
114static void unclone_ctx(struct perf_event_context *ctx)
115{
116 if (ctx->parent_ctx) {
117 put_ctx(ctx->parent_ctx);
118 ctx->parent_ctx = NULL;
119 }
120}
121
122/*
123 * If we inherit events we want to return the parent event id
124 * to userspace.
125 */
126static u64 primary_event_id(struct perf_event *event)
127{
128 u64 id = event->id;
129
130 if (event->parent)
131 id = event->parent->id;
132
133 return id;
134}
135
136/*
137 * Get the perf_event_context for a task and lock it.
138 * This has to cope with with the fact that until it is locked,
139 * the context could get moved to another task.
140 */
141static struct perf_event_context *
142perf_lock_task_context(struct task_struct *task, unsigned long *flags)
143{
144 struct perf_event_context *ctx;
145
146 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200147retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200148 ctx = rcu_dereference(task->perf_event_ctxp);
149 if (ctx) {
150 /*
151 * If this context is a clone of another, it might
152 * get swapped for another underneath us by
153 * perf_event_task_sched_out, though the
154 * rcu_read_lock() protects us from any context
155 * getting freed. Lock the context and check if it
156 * got swapped before we could get the lock, and retry
157 * if so. If we locked the right context, then it
158 * can't get swapped on us any more.
159 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100160 raw_spin_lock_irqsave(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200161 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100162 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200163 goto retry;
164 }
165
166 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100167 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200168 ctx = NULL;
169 }
170 }
171 rcu_read_unlock();
172 return ctx;
173}
174
175/*
176 * Get the context for a task and increment its pin_count so it
177 * can't get swapped to another task. This also increments its
178 * reference count so that the context can't get freed.
179 */
180static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
181{
182 struct perf_event_context *ctx;
183 unsigned long flags;
184
185 ctx = perf_lock_task_context(task, &flags);
186 if (ctx) {
187 ++ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100188 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200189 }
190 return ctx;
191}
192
193static void perf_unpin_context(struct perf_event_context *ctx)
194{
195 unsigned long flags;
196
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100197 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200198 --ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100199 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200200 put_ctx(ctx);
201}
202
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100203static inline u64 perf_clock(void)
204{
Peter Zijlstrac6763292010-05-25 10:48:51 +0200205 return local_clock();
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100206}
207
208/*
209 * Update the record of the current time in a context.
210 */
211static void update_context_time(struct perf_event_context *ctx)
212{
213 u64 now = perf_clock();
214
215 ctx->time += now - ctx->timestamp;
216 ctx->timestamp = now;
217}
218
219/*
220 * Update the total_time_enabled and total_time_running fields for a event.
221 */
222static void update_event_times(struct perf_event *event)
223{
224 struct perf_event_context *ctx = event->ctx;
225 u64 run_end;
226
227 if (event->state < PERF_EVENT_STATE_INACTIVE ||
228 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
229 return;
230
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100231 if (ctx->is_active)
232 run_end = ctx->time;
233 else
234 run_end = event->tstamp_stopped;
235
236 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100237
238 if (event->state == PERF_EVENT_STATE_INACTIVE)
239 run_end = event->tstamp_stopped;
240 else
241 run_end = ctx->time;
242
243 event->total_time_running = run_end - event->tstamp_running;
244}
245
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200246/*
247 * Update total_time_enabled and total_time_running for all events in a group.
248 */
249static void update_group_times(struct perf_event *leader)
250{
251 struct perf_event *event;
252
253 update_event_times(leader);
254 list_for_each_entry(event, &leader->sibling_list, group_entry)
255 update_event_times(event);
256}
257
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100258static struct list_head *
259ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
260{
261 if (event->attr.pinned)
262 return &ctx->pinned_groups;
263 else
264 return &ctx->flexible_groups;
265}
266
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200267/*
268 * Add a event from the lists for its context.
269 * Must be called with ctx->mutex and ctx->lock held.
270 */
271static void
272list_add_event(struct perf_event *event, struct perf_event_context *ctx)
273{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200274 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
275 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200276
277 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200278 * If we're a stand alone event or group leader, we go to the context
279 * list, group events are kept attached to the group so that
280 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200281 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200282 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100283 struct list_head *list;
284
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100285 if (is_software_event(event))
286 event->group_flags |= PERF_GROUP_SOFTWARE;
287
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100288 list = ctx_group_list(event, ctx);
289 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200290 }
291
292 list_add_rcu(&event->event_entry, &ctx->event_list);
293 ctx->nr_events++;
294 if (event->attr.inherit_stat)
295 ctx->nr_stat++;
296}
297
Peter Zijlstra8a495422010-05-27 15:47:49 +0200298static void perf_group_attach(struct perf_event *event)
299{
300 struct perf_event *group_leader = event->group_leader;
301
302 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
303 event->attach_state |= PERF_ATTACH_GROUP;
304
305 if (group_leader == event)
306 return;
307
308 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
309 !is_software_event(event))
310 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
311
312 list_add_tail(&event->group_entry, &group_leader->sibling_list);
313 group_leader->nr_siblings++;
314}
315
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200316/*
317 * Remove a event from the lists for its context.
318 * Must be called with ctx->mutex and ctx->lock held.
319 */
320static void
321list_del_event(struct perf_event *event, struct perf_event_context *ctx)
322{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200323 /*
324 * We can have double detach due to exit/hot-unplug + close.
325 */
326 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200327 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200328
329 event->attach_state &= ~PERF_ATTACH_CONTEXT;
330
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200331 ctx->nr_events--;
332 if (event->attr.inherit_stat)
333 ctx->nr_stat--;
334
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200335 list_del_rcu(&event->event_entry);
336
Peter Zijlstra8a495422010-05-27 15:47:49 +0200337 if (event->group_leader == event)
338 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200339
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200340 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800341
342 /*
343 * If event was in error state, then keep it
344 * that way, otherwise bogus counts will be
345 * returned on read(). The only way to get out
346 * of error state is by explicit re-enabling
347 * of the event
348 */
349 if (event->state > PERF_EVENT_STATE_OFF)
350 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200351}
352
Peter Zijlstra8a495422010-05-27 15:47:49 +0200353static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200354{
355 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200356 struct list_head *list = NULL;
357
358 /*
359 * We can have double detach due to exit/hot-unplug + close.
360 */
361 if (!(event->attach_state & PERF_ATTACH_GROUP))
362 return;
363
364 event->attach_state &= ~PERF_ATTACH_GROUP;
365
366 /*
367 * If this is a sibling, remove it from its group.
368 */
369 if (event->group_leader != event) {
370 list_del_init(&event->group_entry);
371 event->group_leader->nr_siblings--;
372 return;
373 }
374
375 if (!list_empty(&event->group_entry))
376 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100377
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200378 /*
379 * If this was a group event with sibling events then
380 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200381 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200382 */
383 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200384 if (list)
385 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200386 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100387
388 /* Inherit group flags from the previous leader */
389 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200390 }
391}
392
Stephane Eranianfa66f072010-08-26 16:40:01 +0200393static inline int
394event_filter_match(struct perf_event *event)
395{
396 return event->cpu == -1 || event->cpu == smp_processor_id();
397}
398
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200399static void
400event_sched_out(struct perf_event *event,
401 struct perf_cpu_context *cpuctx,
402 struct perf_event_context *ctx)
403{
Stephane Eranianfa66f072010-08-26 16:40:01 +0200404 u64 delta;
405 /*
406 * An event which could not be activated because of
407 * filter mismatch still needs to have its timings
408 * maintained, otherwise bogus information is return
409 * via read() for time_enabled, time_running:
410 */
411 if (event->state == PERF_EVENT_STATE_INACTIVE
412 && !event_filter_match(event)) {
413 delta = ctx->time - event->tstamp_stopped;
414 event->tstamp_running += delta;
415 event->tstamp_stopped = ctx->time;
416 }
417
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200418 if (event->state != PERF_EVENT_STATE_ACTIVE)
419 return;
420
421 event->state = PERF_EVENT_STATE_INACTIVE;
422 if (event->pending_disable) {
423 event->pending_disable = 0;
424 event->state = PERF_EVENT_STATE_OFF;
425 }
426 event->tstamp_stopped = ctx->time;
427 event->pmu->disable(event);
428 event->oncpu = -1;
429
430 if (!is_software_event(event))
431 cpuctx->active_oncpu--;
432 ctx->nr_active--;
433 if (event->attr.exclusive || !cpuctx->active_oncpu)
434 cpuctx->exclusive = 0;
435}
436
437static void
438group_sched_out(struct perf_event *group_event,
439 struct perf_cpu_context *cpuctx,
440 struct perf_event_context *ctx)
441{
442 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +0200443 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200444
445 event_sched_out(group_event, cpuctx, ctx);
446
447 /*
448 * Schedule out siblings (if any):
449 */
450 list_for_each_entry(event, &group_event->sibling_list, group_entry)
451 event_sched_out(event, cpuctx, ctx);
452
Stephane Eranianfa66f072010-08-26 16:40:01 +0200453 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200454 cpuctx->exclusive = 0;
455}
456
457/*
458 * Cross CPU call to remove a performance event
459 *
460 * We disable the event on the hardware level first. After that we
461 * remove it from the context list.
462 */
463static void __perf_event_remove_from_context(void *info)
464{
465 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
466 struct perf_event *event = info;
467 struct perf_event_context *ctx = event->ctx;
468
469 /*
470 * If this is a task context, we need to check whether it is
471 * the current task context of this cpu. If not it has been
472 * scheduled out before the smp call arrived.
473 */
474 if (ctx->task && cpuctx->task_ctx != ctx)
475 return;
476
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100477 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200478
479 event_sched_out(event, cpuctx, ctx);
480
481 list_del_event(event, ctx);
482
483 if (!ctx->task) {
484 /*
485 * Allow more per task events with respect to the
486 * reservation:
487 */
488 cpuctx->max_pertask =
489 min(perf_max_events - ctx->nr_events,
490 perf_max_events - perf_reserved_percpu);
491 }
492
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100493 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200494}
495
496
497/*
498 * Remove the event from a task's (or a CPU's) list of events.
499 *
500 * Must be called with ctx->mutex held.
501 *
502 * CPU events are removed with a smp call. For task events we only
503 * call when the task is on a CPU.
504 *
505 * If event->ctx is a cloned context, callers must make sure that
506 * every task struct that event->ctx->task could possibly point to
507 * remains valid. This is OK when called from perf_release since
508 * that only calls us on the top-level context, which can't be a clone.
509 * When called from perf_event_exit_task, it's OK because the
510 * context has been detached from its task.
511 */
512static void perf_event_remove_from_context(struct perf_event *event)
513{
514 struct perf_event_context *ctx = event->ctx;
515 struct task_struct *task = ctx->task;
516
517 if (!task) {
518 /*
519 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200520 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200521 */
522 smp_call_function_single(event->cpu,
523 __perf_event_remove_from_context,
524 event, 1);
525 return;
526 }
527
528retry:
529 task_oncpu_function_call(task, __perf_event_remove_from_context,
530 event);
531
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100532 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200533 /*
534 * If the context is active we need to retry the smp call.
535 */
536 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100537 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200538 goto retry;
539 }
540
541 /*
542 * The lock prevents that this context is scheduled in so we
543 * can remove the event safely, if the call above did not
544 * succeed.
545 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100546 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200547 list_del_event(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100548 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200549}
550
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200551/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 * Cross CPU call to disable a performance event
553 */
554static void __perf_event_disable(void *info)
555{
556 struct perf_event *event = info;
557 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558 struct perf_event_context *ctx = event->ctx;
559
560 /*
561 * If this is a per-task event, need to check whether this
562 * event's task is the current task on this cpu.
563 */
564 if (ctx->task && cpuctx->task_ctx != ctx)
565 return;
566
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100567 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200568
569 /*
570 * If the event is on, turn it off.
571 * If it is in error state, leave it in error state.
572 */
573 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
574 update_context_time(ctx);
575 update_group_times(event);
576 if (event == event->group_leader)
577 group_sched_out(event, cpuctx, ctx);
578 else
579 event_sched_out(event, cpuctx, ctx);
580 event->state = PERF_EVENT_STATE_OFF;
581 }
582
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100583 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200584}
585
586/*
587 * Disable a event.
588 *
589 * If event->ctx is a cloned context, callers must make sure that
590 * every task struct that event->ctx->task could possibly point to
591 * remains valid. This condition is satisifed when called through
592 * perf_event_for_each_child or perf_event_for_each because they
593 * hold the top-level event's child_mutex, so any descendant that
594 * goes to exit will block in sync_child_event.
595 * When called from perf_pending_event it's OK because event->ctx
596 * is the current context on this CPU and preemption is disabled,
597 * hence we can't get into perf_event_task_sched_out for this context.
598 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100599void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200600{
601 struct perf_event_context *ctx = event->ctx;
602 struct task_struct *task = ctx->task;
603
604 if (!task) {
605 /*
606 * Disable the event on the cpu that it's on
607 */
608 smp_call_function_single(event->cpu, __perf_event_disable,
609 event, 1);
610 return;
611 }
612
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200613retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200614 task_oncpu_function_call(task, __perf_event_disable, event);
615
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100616 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200617 /*
618 * If the event is still active, we need to retry the cross-call.
619 */
620 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100621 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200622 goto retry;
623 }
624
625 /*
626 * Since we have the lock this context can't be scheduled
627 * in, so we can change the state safely.
628 */
629 if (event->state == PERF_EVENT_STATE_INACTIVE) {
630 update_group_times(event);
631 event->state = PERF_EVENT_STATE_OFF;
632 }
633
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100634 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200635}
636
637static int
638event_sched_in(struct perf_event *event,
639 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100640 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200641{
642 if (event->state <= PERF_EVENT_STATE_OFF)
643 return 0;
644
645 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100646 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200647 /*
648 * The new state must be visible before we turn it on in the hardware:
649 */
650 smp_wmb();
651
652 if (event->pmu->enable(event)) {
653 event->state = PERF_EVENT_STATE_INACTIVE;
654 event->oncpu = -1;
655 return -EAGAIN;
656 }
657
658 event->tstamp_running += ctx->time - event->tstamp_stopped;
659
660 if (!is_software_event(event))
661 cpuctx->active_oncpu++;
662 ctx->nr_active++;
663
664 if (event->attr.exclusive)
665 cpuctx->exclusive = 1;
666
667 return 0;
668}
669
670static int
671group_sched_in(struct perf_event *group_event,
672 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100673 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200674{
Lin Ming6bde9b62010-04-23 13:56:00 +0800675 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200676 struct pmu *pmu = group_event->pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200677
678 if (group_event->state == PERF_EVENT_STATE_OFF)
679 return 0;
680
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200681 pmu->start_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200682
Stephane Eranian90151c352010-05-25 16:23:10 +0200683 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200684 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200685 return -EAGAIN;
Stephane Eranian90151c352010-05-25 16:23:10 +0200686 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200687
688 /*
689 * Schedule in siblings as one group (if any):
690 */
691 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Peter Zijlstra6e377382010-02-11 13:21:58 +0100692 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200693 partial_group = event;
694 goto group_error;
695 }
696 }
697
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200698 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +1000699 return 0;
Lin Ming6bde9b62010-04-23 13:56:00 +0800700
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200701group_error:
702 /*
703 * Groups can be scheduled in as one unit only, so undo any
704 * partial group before returning:
705 */
706 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
707 if (event == partial_group)
708 break;
709 event_sched_out(event, cpuctx, ctx);
710 }
711 event_sched_out(group_event, cpuctx, ctx);
712
Peter Zijlstraad5133b2010-06-15 12:22:39 +0200713 pmu->cancel_txn(pmu);
Stephane Eranian90151c352010-05-25 16:23:10 +0200714
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200715 return -EAGAIN;
716}
717
718/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200719 * Work out whether we can put this event group on the CPU now.
720 */
721static int group_can_go_on(struct perf_event *event,
722 struct perf_cpu_context *cpuctx,
723 int can_add_hw)
724{
725 /*
726 * Groups consisting entirely of software events can always go on.
727 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100728 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200729 return 1;
730 /*
731 * If an exclusive group is already on, no other hardware
732 * events can go on.
733 */
734 if (cpuctx->exclusive)
735 return 0;
736 /*
737 * If this group is exclusive and there are already
738 * events on the CPU, it can't go on.
739 */
740 if (event->attr.exclusive && cpuctx->active_oncpu)
741 return 0;
742 /*
743 * Otherwise, try to add it if all previous groups were able
744 * to go on.
745 */
746 return can_add_hw;
747}
748
749static void add_event_to_ctx(struct perf_event *event,
750 struct perf_event_context *ctx)
751{
752 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200753 perf_group_attach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200754 event->tstamp_enabled = ctx->time;
755 event->tstamp_running = ctx->time;
756 event->tstamp_stopped = ctx->time;
757}
758
759/*
760 * Cross CPU call to install and enable a performance event
761 *
762 * Must be called with ctx->mutex held
763 */
764static void __perf_install_in_context(void *info)
765{
766 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
767 struct perf_event *event = info;
768 struct perf_event_context *ctx = event->ctx;
769 struct perf_event *leader = event->group_leader;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200770 int err;
771
772 /*
773 * If this is a task context, we need to check whether it is
774 * the current task context of this cpu. If not it has been
775 * scheduled out before the smp call arrived.
776 * Or possibly this is the right context but it isn't
777 * on this cpu because it had no events.
778 */
779 if (ctx->task && cpuctx->task_ctx != ctx) {
780 if (cpuctx->task_ctx || ctx->task != current)
781 return;
782 cpuctx->task_ctx = ctx;
783 }
784
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100785 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200786 ctx->is_active = 1;
787 update_context_time(ctx);
788
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200789 add_event_to_ctx(event, ctx);
790
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100791 if (event->cpu != -1 && event->cpu != smp_processor_id())
792 goto unlock;
793
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200794 /*
795 * Don't put the event on if it is disabled or if
796 * it is in a group and the group isn't on.
797 */
798 if (event->state != PERF_EVENT_STATE_INACTIVE ||
799 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
800 goto unlock;
801
802 /*
803 * An exclusive event can't go on if there are already active
804 * hardware events, and no hardware event can go on if there
805 * is already an exclusive event on.
806 */
807 if (!group_can_go_on(event, cpuctx, 1))
808 err = -EEXIST;
809 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100810 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200811
812 if (err) {
813 /*
814 * This event couldn't go on. If it is in a group
815 * then we have to pull the whole group off.
816 * If the event group is pinned then put it in error state.
817 */
818 if (leader != event)
819 group_sched_out(leader, cpuctx, ctx);
820 if (leader->attr.pinned) {
821 update_group_times(leader);
822 leader->state = PERF_EVENT_STATE_ERROR;
823 }
824 }
825
826 if (!err && !ctx->task && cpuctx->max_pertask)
827 cpuctx->max_pertask--;
828
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200829unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100830 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200831}
832
833/*
834 * Attach a performance event to a context
835 *
836 * First we add the event to the list with the hardware enable bit
837 * in event->hw_config cleared.
838 *
839 * If the event is attached to a task which is on a CPU we use a smp
840 * call to enable it in the task context. The task might have been
841 * scheduled away, but we check this in the smp call again.
842 *
843 * Must be called with ctx->mutex held.
844 */
845static void
846perf_install_in_context(struct perf_event_context *ctx,
847 struct perf_event *event,
848 int cpu)
849{
850 struct task_struct *task = ctx->task;
851
852 if (!task) {
853 /*
854 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200855 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200856 */
857 smp_call_function_single(cpu, __perf_install_in_context,
858 event, 1);
859 return;
860 }
861
862retry:
863 task_oncpu_function_call(task, __perf_install_in_context,
864 event);
865
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100866 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200867 /*
868 * we need to retry the smp call.
869 */
870 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100871 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200872 goto retry;
873 }
874
875 /*
876 * The lock prevents that this context is scheduled in so we
877 * can add the event safely, if it the call above did not
878 * succeed.
879 */
880 if (list_empty(&event->group_entry))
881 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100882 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200883}
884
885/*
886 * Put a event into inactive state and update time fields.
887 * Enabling the leader of a group effectively enables all
888 * the group members that aren't explicitly disabled, so we
889 * have to update their ->tstamp_enabled also.
890 * Note: this works for group members as well as group leaders
891 * since the non-leader members' sibling_lists will be empty.
892 */
893static void __perf_event_mark_enabled(struct perf_event *event,
894 struct perf_event_context *ctx)
895{
896 struct perf_event *sub;
897
898 event->state = PERF_EVENT_STATE_INACTIVE;
899 event->tstamp_enabled = ctx->time - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200900 list_for_each_entry(sub, &event->sibling_list, group_entry) {
901 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200902 sub->tstamp_enabled =
903 ctx->time - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200904 }
905 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200906}
907
908/*
909 * Cross CPU call to enable a performance event
910 */
911static void __perf_event_enable(void *info)
912{
913 struct perf_event *event = info;
914 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
915 struct perf_event_context *ctx = event->ctx;
916 struct perf_event *leader = event->group_leader;
917 int err;
918
919 /*
920 * If this is a per-task event, need to check whether this
921 * event's task is the current task on this cpu.
922 */
923 if (ctx->task && cpuctx->task_ctx != ctx) {
924 if (cpuctx->task_ctx || ctx->task != current)
925 return;
926 cpuctx->task_ctx = ctx;
927 }
928
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100929 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200930 ctx->is_active = 1;
931 update_context_time(ctx);
932
933 if (event->state >= PERF_EVENT_STATE_INACTIVE)
934 goto unlock;
935 __perf_event_mark_enabled(event, ctx);
936
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100937 if (event->cpu != -1 && event->cpu != smp_processor_id())
938 goto unlock;
939
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200940 /*
941 * If the event is in a group and isn't the group leader,
942 * then don't put it on unless the group is on.
943 */
944 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
945 goto unlock;
946
947 if (!group_can_go_on(event, cpuctx, 1)) {
948 err = -EEXIST;
949 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200950 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +0100951 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200952 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100953 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200954 }
955
956 if (err) {
957 /*
958 * If this event can't go on and it's part of a
959 * group, then the whole group has to come off.
960 */
961 if (leader != event)
962 group_sched_out(leader, cpuctx, ctx);
963 if (leader->attr.pinned) {
964 update_group_times(leader);
965 leader->state = PERF_EVENT_STATE_ERROR;
966 }
967 }
968
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200969unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100970 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200971}
972
973/*
974 * Enable a event.
975 *
976 * If event->ctx is a cloned context, callers must make sure that
977 * every task struct that event->ctx->task could possibly point to
978 * remains valid. This condition is satisfied when called through
979 * perf_event_for_each_child or perf_event_for_each as described
980 * for perf_event_disable.
981 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100982void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200983{
984 struct perf_event_context *ctx = event->ctx;
985 struct task_struct *task = ctx->task;
986
987 if (!task) {
988 /*
989 * Enable the event on the cpu that it's on
990 */
991 smp_call_function_single(event->cpu, __perf_event_enable,
992 event, 1);
993 return;
994 }
995
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100996 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200997 if (event->state >= PERF_EVENT_STATE_INACTIVE)
998 goto out;
999
1000 /*
1001 * If the event is in error state, clear that first.
1002 * That way, if we see the event in error state below, we
1003 * know that it has gone back into error state, as distinct
1004 * from the task having been scheduled away before the
1005 * cross-call arrived.
1006 */
1007 if (event->state == PERF_EVENT_STATE_ERROR)
1008 event->state = PERF_EVENT_STATE_OFF;
1009
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001010retry:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001011 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001012 task_oncpu_function_call(task, __perf_event_enable, event);
1013
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001014 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001015
1016 /*
1017 * If the context is active and the event is still off,
1018 * we need to retry the cross-call.
1019 */
1020 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1021 goto retry;
1022
1023 /*
1024 * Since we have the lock this context can't be scheduled
1025 * in, so we can change the state safely.
1026 */
1027 if (event->state == PERF_EVENT_STATE_OFF)
1028 __perf_event_mark_enabled(event, ctx);
1029
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001030out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001031 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032}
1033
1034static int perf_event_refresh(struct perf_event *event, int refresh)
1035{
1036 /*
1037 * not supported on inherited events
1038 */
1039 if (event->attr.inherit)
1040 return -EINVAL;
1041
1042 atomic_add(refresh, &event->event_limit);
1043 perf_event_enable(event);
1044
1045 return 0;
1046}
1047
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001048enum event_type_t {
1049 EVENT_FLEXIBLE = 0x1,
1050 EVENT_PINNED = 0x2,
1051 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1052};
1053
1054static void ctx_sched_out(struct perf_event_context *ctx,
1055 struct perf_cpu_context *cpuctx,
1056 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001057{
1058 struct perf_event *event;
1059
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001060 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001061 ctx->is_active = 0;
1062 if (likely(!ctx->nr_events))
1063 goto out;
1064 update_context_time(ctx);
1065
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001066 if (!ctx->nr_active)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001067 goto out;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001068
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001069 if (event_type & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001070 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1071 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001072 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001073
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001074 if (event_type & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001075 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001076 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001077 }
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001078out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001079 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001080}
1081
1082/*
1083 * Test whether two contexts are equivalent, i.e. whether they
1084 * have both been cloned from the same version of the same context
1085 * and they both have the same number of enabled events.
1086 * If the number of enabled events is the same, then the set
1087 * of enabled events should be the same, because these are both
1088 * inherited contexts, therefore we can't access individual events
1089 * in them directly with an fd; we can only enable/disable all
1090 * events via prctl, or enable/disable all events in a family
1091 * via ioctl, which will have the same effect on both contexts.
1092 */
1093static int context_equiv(struct perf_event_context *ctx1,
1094 struct perf_event_context *ctx2)
1095{
1096 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1097 && ctx1->parent_gen == ctx2->parent_gen
1098 && !ctx1->pin_count && !ctx2->pin_count;
1099}
1100
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001101static void __perf_event_sync_stat(struct perf_event *event,
1102 struct perf_event *next_event)
1103{
1104 u64 value;
1105
1106 if (!event->attr.inherit_stat)
1107 return;
1108
1109 /*
1110 * Update the event value, we cannot use perf_event_read()
1111 * because we're in the middle of a context switch and have IRQs
1112 * disabled, which upsets smp_call_function_single(), however
1113 * we know the event must be on the current CPU, therefore we
1114 * don't need to use it.
1115 */
1116 switch (event->state) {
1117 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001118 event->pmu->read(event);
1119 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120
1121 case PERF_EVENT_STATE_INACTIVE:
1122 update_event_times(event);
1123 break;
1124
1125 default:
1126 break;
1127 }
1128
1129 /*
1130 * In order to keep per-task stats reliable we need to flip the event
1131 * values when we flip the contexts.
1132 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001133 value = local64_read(&next_event->count);
1134 value = local64_xchg(&event->count, value);
1135 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001136
1137 swap(event->total_time_enabled, next_event->total_time_enabled);
1138 swap(event->total_time_running, next_event->total_time_running);
1139
1140 /*
1141 * Since we swizzled the values, update the user visible data too.
1142 */
1143 perf_event_update_userpage(event);
1144 perf_event_update_userpage(next_event);
1145}
1146
1147#define list_next_entry(pos, member) \
1148 list_entry(pos->member.next, typeof(*pos), member)
1149
1150static void perf_event_sync_stat(struct perf_event_context *ctx,
1151 struct perf_event_context *next_ctx)
1152{
1153 struct perf_event *event, *next_event;
1154
1155 if (!ctx->nr_stat)
1156 return;
1157
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001158 update_context_time(ctx);
1159
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001160 event = list_first_entry(&ctx->event_list,
1161 struct perf_event, event_entry);
1162
1163 next_event = list_first_entry(&next_ctx->event_list,
1164 struct perf_event, event_entry);
1165
1166 while (&event->event_entry != &ctx->event_list &&
1167 &next_event->event_entry != &next_ctx->event_list) {
1168
1169 __perf_event_sync_stat(event, next_event);
1170
1171 event = list_next_entry(event, event_entry);
1172 next_event = list_next_entry(next_event, event_entry);
1173 }
1174}
1175
1176/*
1177 * Called from scheduler to remove the events of the current task,
1178 * with interrupts disabled.
1179 *
1180 * We stop each event and update the event value in event->count.
1181 *
1182 * This does not protect us against NMI, but disable()
1183 * sets the disabled bit in the control field of event _before_
1184 * accessing the event control register. If a NMI hits, then it will
1185 * not restart the event.
1186 */
1187void perf_event_task_sched_out(struct task_struct *task,
Peter Zijlstra49f47432009-12-27 11:51:52 +01001188 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189{
Peter Zijlstra49f47432009-12-27 11:51:52 +01001190 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001191 struct perf_event_context *ctx = task->perf_event_ctxp;
1192 struct perf_event_context *next_ctx;
1193 struct perf_event_context *parent;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001194 int do_switch = 1;
1195
Frederic Weisbeckere49a5bd2010-03-22 19:40:03 +01001196 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001197
1198 if (likely(!ctx || !cpuctx->task_ctx))
1199 return;
1200
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001201 rcu_read_lock();
1202 parent = rcu_dereference(ctx->parent_ctx);
1203 next_ctx = next->perf_event_ctxp;
1204 if (parent && next_ctx &&
1205 rcu_dereference(next_ctx->parent_ctx) == parent) {
1206 /*
1207 * Looks like the two contexts are clones, so we might be
1208 * able to optimize the context switch. We lock both
1209 * contexts and check that they are clones under the
1210 * lock (including re-checking that neither has been
1211 * uncloned in the meantime). It doesn't matter which
1212 * order we take the locks because no other cpu could
1213 * be trying to lock both of these tasks.
1214 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001215 raw_spin_lock(&ctx->lock);
1216 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001217 if (context_equiv(ctx, next_ctx)) {
1218 /*
1219 * XXX do we need a memory barrier of sorts
1220 * wrt to rcu_dereference() of perf_event_ctxp
1221 */
1222 task->perf_event_ctxp = next_ctx;
1223 next->perf_event_ctxp = ctx;
1224 ctx->task = next;
1225 next_ctx->task = task;
1226 do_switch = 0;
1227
1228 perf_event_sync_stat(ctx, next_ctx);
1229 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001230 raw_spin_unlock(&next_ctx->lock);
1231 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001232 }
1233 rcu_read_unlock();
1234
1235 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001236 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001237 cpuctx->task_ctx = NULL;
1238 }
1239}
1240
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001241static void task_ctx_sched_out(struct perf_event_context *ctx,
1242 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001243{
1244 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1245
1246 if (!cpuctx->task_ctx)
1247 return;
1248
1249 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1250 return;
1251
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001252 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001253 cpuctx->task_ctx = NULL;
1254}
1255
1256/*
1257 * Called with IRQs disabled
1258 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001259static void __perf_event_task_sched_out(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001260{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001261 task_ctx_sched_out(ctx, EVENT_ALL);
1262}
1263
1264/*
1265 * Called with IRQs disabled
1266 */
1267static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1268 enum event_type_t event_type)
1269{
1270 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001271}
1272
1273static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001274ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001275 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001276{
1277 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001278
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001279 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1280 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001281 continue;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001282 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001283 continue;
1284
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001285 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001286 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001287
1288 /*
1289 * If this pinned group hasn't been scheduled,
1290 * put it in error state.
1291 */
1292 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1293 update_group_times(event);
1294 event->state = PERF_EVENT_STATE_ERROR;
1295 }
1296 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001297}
1298
1299static void
1300ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001301 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001302{
1303 struct perf_event *event;
1304 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001305
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001306 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1307 /* Ignore events in OFF or ERROR state */
1308 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001309 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001310 /*
1311 * Listen to the 'cpu' scheduling filter constraint
1312 * of events:
1313 */
Peter Zijlstra6e377382010-02-11 13:21:58 +01001314 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001315 continue;
1316
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001317 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001318 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001319 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001320 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001321 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001322}
1323
1324static void
1325ctx_sched_in(struct perf_event_context *ctx,
1326 struct perf_cpu_context *cpuctx,
1327 enum event_type_t event_type)
1328{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001329 raw_spin_lock(&ctx->lock);
1330 ctx->is_active = 1;
1331 if (likely(!ctx->nr_events))
1332 goto out;
1333
1334 ctx->timestamp = perf_clock();
1335
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001336 /*
1337 * First go through the list and put on any pinned groups
1338 * in order to give them the best chance of going on.
1339 */
1340 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001341 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001342
1343 /* Then walk through the lower prio flexible groups */
1344 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001345 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001346
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001347out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001348 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001349}
1350
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001351static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1352 enum event_type_t event_type)
1353{
1354 struct perf_event_context *ctx = &cpuctx->ctx;
1355
1356 ctx_sched_in(ctx, cpuctx, event_type);
1357}
1358
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001359static void task_ctx_sched_in(struct task_struct *task,
1360 enum event_type_t event_type)
1361{
1362 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1363 struct perf_event_context *ctx = task->perf_event_ctxp;
1364
1365 if (likely(!ctx))
1366 return;
1367 if (cpuctx->task_ctx == ctx)
1368 return;
1369 ctx_sched_in(ctx, cpuctx, event_type);
1370 cpuctx->task_ctx = ctx;
1371}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001372/*
1373 * Called from scheduler to add the events of the current task
1374 * with interrupts disabled.
1375 *
1376 * We restore the event value and then enable it.
1377 *
1378 * This does not protect us against NMI, but enable()
1379 * sets the enabled bit in the control field of event _before_
1380 * accessing the event control register. If a NMI hits, then it will
1381 * keep the event running.
1382 */
Peter Zijlstra49f47432009-12-27 11:51:52 +01001383void perf_event_task_sched_in(struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384{
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001385 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1386 struct perf_event_context *ctx = task->perf_event_ctxp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001387
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001388 if (likely(!ctx))
1389 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001391 if (cpuctx->task_ctx == ctx)
1392 return;
1393
1394 /*
1395 * We want to keep the following priority order:
1396 * cpu pinned (that don't need to move), task pinned,
1397 * cpu flexible, task flexible.
1398 */
1399 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1400
1401 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1402 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1403 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1404
1405 cpuctx->task_ctx = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001406}
1407
1408#define MAX_INTERRUPTS (~0ULL)
1409
1410static void perf_log_throttle(struct perf_event *event, int enable);
1411
Peter Zijlstraabd50712010-01-26 18:50:16 +01001412static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1413{
1414 u64 frequency = event->attr.sample_freq;
1415 u64 sec = NSEC_PER_SEC;
1416 u64 divisor, dividend;
1417
1418 int count_fls, nsec_fls, frequency_fls, sec_fls;
1419
1420 count_fls = fls64(count);
1421 nsec_fls = fls64(nsec);
1422 frequency_fls = fls64(frequency);
1423 sec_fls = 30;
1424
1425 /*
1426 * We got @count in @nsec, with a target of sample_freq HZ
1427 * the target period becomes:
1428 *
1429 * @count * 10^9
1430 * period = -------------------
1431 * @nsec * sample_freq
1432 *
1433 */
1434
1435 /*
1436 * Reduce accuracy by one bit such that @a and @b converge
1437 * to a similar magnitude.
1438 */
1439#define REDUCE_FLS(a, b) \
1440do { \
1441 if (a##_fls > b##_fls) { \
1442 a >>= 1; \
1443 a##_fls--; \
1444 } else { \
1445 b >>= 1; \
1446 b##_fls--; \
1447 } \
1448} while (0)
1449
1450 /*
1451 * Reduce accuracy until either term fits in a u64, then proceed with
1452 * the other, so that finally we can do a u64/u64 division.
1453 */
1454 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1455 REDUCE_FLS(nsec, frequency);
1456 REDUCE_FLS(sec, count);
1457 }
1458
1459 if (count_fls + sec_fls > 64) {
1460 divisor = nsec * frequency;
1461
1462 while (count_fls + sec_fls > 64) {
1463 REDUCE_FLS(count, sec);
1464 divisor >>= 1;
1465 }
1466
1467 dividend = count * sec;
1468 } else {
1469 dividend = count * sec;
1470
1471 while (nsec_fls + frequency_fls > 64) {
1472 REDUCE_FLS(nsec, frequency);
1473 dividend >>= 1;
1474 }
1475
1476 divisor = nsec * frequency;
1477 }
1478
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001479 if (!divisor)
1480 return dividend;
1481
Peter Zijlstraabd50712010-01-26 18:50:16 +01001482 return div64_u64(dividend, divisor);
1483}
1484
Stephane Eraniand76a0812010-02-08 17:06:01 +02001485static void perf_event_stop(struct perf_event *event)
1486{
1487 if (!event->pmu->stop)
1488 return event->pmu->disable(event);
1489
1490 return event->pmu->stop(event);
1491}
1492
1493static int perf_event_start(struct perf_event *event)
1494{
1495 if (!event->pmu->start)
1496 return event->pmu->enable(event);
1497
1498 return event->pmu->start(event);
1499}
1500
Peter Zijlstraabd50712010-01-26 18:50:16 +01001501static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001502{
1503 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02001504 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001505 s64 delta;
1506
Peter Zijlstraabd50712010-01-26 18:50:16 +01001507 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001508
1509 delta = (s64)(period - hwc->sample_period);
1510 delta = (delta + 7) / 8; /* low pass filter */
1511
1512 sample_period = hwc->sample_period + delta;
1513
1514 if (!sample_period)
1515 sample_period = 1;
1516
1517 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001518
Peter Zijlstrae7850592010-05-21 14:43:08 +02001519 if (local64_read(&hwc->period_left) > 8*sample_period) {
Stephane Eraniand76a0812010-02-08 17:06:01 +02001520 perf_event_stop(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001521 local64_set(&hwc->period_left, 0);
Stephane Eraniand76a0812010-02-08 17:06:01 +02001522 perf_event_start(event);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001523 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001524}
1525
1526static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1527{
1528 struct perf_event *event;
1529 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001530 u64 interrupts, now;
1531 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001532
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001533 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001534 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001535 if (event->state != PERF_EVENT_STATE_ACTIVE)
1536 continue;
1537
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001538 if (event->cpu != -1 && event->cpu != smp_processor_id())
1539 continue;
1540
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001541 hwc = &event->hw;
1542
1543 interrupts = hwc->interrupts;
1544 hwc->interrupts = 0;
1545
1546 /*
1547 * unthrottle events on the tick
1548 */
1549 if (interrupts == MAX_INTERRUPTS) {
1550 perf_log_throttle(event, 1);
1551 event->pmu->unthrottle(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001552 }
1553
1554 if (!event->attr.freq || !event->attr.sample_freq)
1555 continue;
1556
Peter Zijlstraabd50712010-01-26 18:50:16 +01001557 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001558 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001559 delta = now - hwc->freq_count_stamp;
1560 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001561
Peter Zijlstraabd50712010-01-26 18:50:16 +01001562 if (delta > 0)
1563 perf_adjust_period(event, TICK_NSEC, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001564 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001565 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001566}
1567
1568/*
1569 * Round-robin a context's events:
1570 */
1571static void rotate_ctx(struct perf_event_context *ctx)
1572{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001573 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001574
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001575 /* Rotate the first entry last of non-pinned groups */
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001576 list_rotate_left(&ctx->flexible_groups);
1577
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001578 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001579}
1580
Peter Zijlstra49f47432009-12-27 11:51:52 +01001581void perf_event_task_tick(struct task_struct *curr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001582{
1583 struct perf_cpu_context *cpuctx;
1584 struct perf_event_context *ctx;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001585 int rotate = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001586
1587 if (!atomic_read(&nr_events))
1588 return;
1589
Peter Zijlstra49f47432009-12-27 11:51:52 +01001590 cpuctx = &__get_cpu_var(perf_cpu_context);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001591 if (cpuctx->ctx.nr_events &&
1592 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1593 rotate = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001594
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001595 ctx = curr->perf_event_ctxp;
1596 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1597 rotate = 1;
Peter Zijlstra9717e6c2010-01-28 13:57:44 +01001598
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001599 perf_ctx_adjust_freq(&cpuctx->ctx);
1600 if (ctx)
1601 perf_ctx_adjust_freq(ctx);
1602
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001603 if (!rotate)
1604 return;
1605
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001606 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001607 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001608 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001609
1610 rotate_ctx(&cpuctx->ctx);
1611 if (ctx)
1612 rotate_ctx(ctx);
1613
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001614 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001615 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001616 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001617}
1618
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001619static int event_enable_on_exec(struct perf_event *event,
1620 struct perf_event_context *ctx)
1621{
1622 if (!event->attr.enable_on_exec)
1623 return 0;
1624
1625 event->attr.enable_on_exec = 0;
1626 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1627 return 0;
1628
1629 __perf_event_mark_enabled(event, ctx);
1630
1631 return 1;
1632}
1633
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001634/*
1635 * Enable all of a task's events that have been marked enable-on-exec.
1636 * This expects task == current.
1637 */
1638static void perf_event_enable_on_exec(struct task_struct *task)
1639{
1640 struct perf_event_context *ctx;
1641 struct perf_event *event;
1642 unsigned long flags;
1643 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001644 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001645
1646 local_irq_save(flags);
1647 ctx = task->perf_event_ctxp;
1648 if (!ctx || !ctx->nr_events)
1649 goto out;
1650
1651 __perf_event_task_sched_out(ctx);
1652
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001653 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001654
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001655 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1656 ret = event_enable_on_exec(event, ctx);
1657 if (ret)
1658 enabled = 1;
1659 }
1660
1661 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1662 ret = event_enable_on_exec(event, ctx);
1663 if (ret)
1664 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001665 }
1666
1667 /*
1668 * Unclone this context if we enabled any event.
1669 */
1670 if (enabled)
1671 unclone_ctx(ctx);
1672
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001673 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001674
Peter Zijlstra49f47432009-12-27 11:51:52 +01001675 perf_event_task_sched_in(task);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001676out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001677 local_irq_restore(flags);
1678}
1679
1680/*
1681 * Cross CPU call to read the hardware event
1682 */
1683static void __perf_event_read(void *info)
1684{
1685 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1686 struct perf_event *event = info;
1687 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001688
1689 /*
1690 * If this is a task context, we need to check whether it is
1691 * the current task context of this cpu. If not it has been
1692 * scheduled out before the smp call arrived. In that case
1693 * event->count would have been updated to a recent sample
1694 * when the event was scheduled out.
1695 */
1696 if (ctx->task && cpuctx->task_ctx != ctx)
1697 return;
1698
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001699 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001700 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001701 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001702 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001703
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001704 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001705}
1706
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001707static inline u64 perf_event_count(struct perf_event *event)
1708{
Peter Zijlstrae7850592010-05-21 14:43:08 +02001709 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001710}
1711
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001712static u64 perf_event_read(struct perf_event *event)
1713{
1714 /*
1715 * If event is enabled and currently active on a CPU, update the
1716 * value in the event structure:
1717 */
1718 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1719 smp_call_function_single(event->oncpu,
1720 __perf_event_read, event, 1);
1721 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001722 struct perf_event_context *ctx = event->ctx;
1723 unsigned long flags;
1724
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001725 raw_spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001726 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001728 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001729 }
1730
Peter Zijlstrab5e58792010-05-21 14:43:12 +02001731 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001732}
1733
1734/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001735 * Callchain support
1736 */
1737
1738struct callchain_cpus_entries {
1739 struct rcu_head rcu_head;
1740 struct perf_callchain_entry *cpu_entries[0];
1741};
1742
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001743static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001744static atomic_t nr_callchain_events;
1745static DEFINE_MUTEX(callchain_mutex);
1746struct callchain_cpus_entries *callchain_cpus_entries;
1747
1748
1749__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1750 struct pt_regs *regs)
1751{
1752}
1753
1754__weak void perf_callchain_user(struct perf_callchain_entry *entry,
1755 struct pt_regs *regs)
1756{
1757}
1758
1759static void release_callchain_buffers_rcu(struct rcu_head *head)
1760{
1761 struct callchain_cpus_entries *entries;
1762 int cpu;
1763
1764 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1765
1766 for_each_possible_cpu(cpu)
1767 kfree(entries->cpu_entries[cpu]);
1768
1769 kfree(entries);
1770}
1771
1772static void release_callchain_buffers(void)
1773{
1774 struct callchain_cpus_entries *entries;
1775
1776 entries = callchain_cpus_entries;
1777 rcu_assign_pointer(callchain_cpus_entries, NULL);
1778 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1779}
1780
1781static int alloc_callchain_buffers(void)
1782{
1783 int cpu;
1784 int size;
1785 struct callchain_cpus_entries *entries;
1786
1787 /*
1788 * We can't use the percpu allocation API for data that can be
1789 * accessed from NMI. Use a temporary manual per cpu allocation
1790 * until that gets sorted out.
1791 */
1792 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1793 num_possible_cpus();
1794
1795 entries = kzalloc(size, GFP_KERNEL);
1796 if (!entries)
1797 return -ENOMEM;
1798
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02001799 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001800
1801 for_each_possible_cpu(cpu) {
1802 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1803 cpu_to_node(cpu));
1804 if (!entries->cpu_entries[cpu])
1805 goto fail;
1806 }
1807
1808 rcu_assign_pointer(callchain_cpus_entries, entries);
1809
1810 return 0;
1811
1812fail:
1813 for_each_possible_cpu(cpu)
1814 kfree(entries->cpu_entries[cpu]);
1815 kfree(entries);
1816
1817 return -ENOMEM;
1818}
1819
1820static int get_callchain_buffers(void)
1821{
1822 int err = 0;
1823 int count;
1824
1825 mutex_lock(&callchain_mutex);
1826
1827 count = atomic_inc_return(&nr_callchain_events);
1828 if (WARN_ON_ONCE(count < 1)) {
1829 err = -EINVAL;
1830 goto exit;
1831 }
1832
1833 if (count > 1) {
1834 /* If the allocation failed, give up */
1835 if (!callchain_cpus_entries)
1836 err = -ENOMEM;
1837 goto exit;
1838 }
1839
1840 err = alloc_callchain_buffers();
1841 if (err)
1842 release_callchain_buffers();
1843exit:
1844 mutex_unlock(&callchain_mutex);
1845
1846 return err;
1847}
1848
1849static void put_callchain_buffers(void)
1850{
1851 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1852 release_callchain_buffers();
1853 mutex_unlock(&callchain_mutex);
1854 }
1855}
1856
1857static int get_recursion_context(int *recursion)
1858{
1859 int rctx;
1860
1861 if (in_nmi())
1862 rctx = 3;
1863 else if (in_irq())
1864 rctx = 2;
1865 else if (in_softirq())
1866 rctx = 1;
1867 else
1868 rctx = 0;
1869
1870 if (recursion[rctx])
1871 return -1;
1872
1873 recursion[rctx]++;
1874 barrier();
1875
1876 return rctx;
1877}
1878
1879static inline void put_recursion_context(int *recursion, int rctx)
1880{
1881 barrier();
1882 recursion[rctx]--;
1883}
1884
1885static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1886{
1887 int cpu;
1888 struct callchain_cpus_entries *entries;
1889
1890 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
1891 if (*rctx == -1)
1892 return NULL;
1893
1894 entries = rcu_dereference(callchain_cpus_entries);
1895 if (!entries)
1896 return NULL;
1897
1898 cpu = smp_processor_id();
1899
1900 return &entries->cpu_entries[cpu][*rctx];
1901}
1902
1903static void
1904put_callchain_entry(int rctx)
1905{
1906 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
1907}
1908
1909static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1910{
1911 int rctx;
1912 struct perf_callchain_entry *entry;
1913
1914
1915 entry = get_callchain_entry(&rctx);
1916 if (rctx == -1)
1917 return NULL;
1918
1919 if (!entry)
1920 goto exit_put;
1921
1922 entry->nr = 0;
1923
1924 if (!user_mode(regs)) {
1925 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1926 perf_callchain_kernel(entry, regs);
1927 if (current->mm)
1928 regs = task_pt_regs(current);
1929 else
1930 regs = NULL;
1931 }
1932
1933 if (regs) {
1934 perf_callchain_store(entry, PERF_CONTEXT_USER);
1935 perf_callchain_user(entry, regs);
1936 }
1937
1938exit_put:
1939 put_callchain_entry(rctx);
1940
1941 return entry;
1942}
1943
1944/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001945 * Initialize the perf_event context in a task_struct:
1946 */
1947static void
1948__perf_event_init_context(struct perf_event_context *ctx,
1949 struct task_struct *task)
1950{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001951 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001952 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001953 INIT_LIST_HEAD(&ctx->pinned_groups);
1954 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001955 INIT_LIST_HEAD(&ctx->event_list);
1956 atomic_set(&ctx->refcount, 1);
1957 ctx->task = task;
1958}
1959
1960static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1961{
1962 struct perf_event_context *ctx;
1963 struct perf_cpu_context *cpuctx;
1964 struct task_struct *task;
1965 unsigned long flags;
1966 int err;
1967
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001968 if (pid == -1 && cpu != -1) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001969 /* Must be root to operate on a CPU event: */
1970 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1971 return ERR_PTR(-EACCES);
1972
Paul Mackerras0f624e72009-12-15 19:40:32 +11001973 if (cpu < 0 || cpu >= nr_cpumask_bits)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001974 return ERR_PTR(-EINVAL);
1975
1976 /*
1977 * We could be clever and allow to attach a event to an
1978 * offline CPU and activate it when the CPU comes up, but
1979 * that's for later.
1980 */
Rusty Russellf6325e32009-12-17 11:43:08 -06001981 if (!cpu_online(cpu))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001982 return ERR_PTR(-ENODEV);
1983
1984 cpuctx = &per_cpu(perf_cpu_context, cpu);
1985 ctx = &cpuctx->ctx;
1986 get_ctx(ctx);
1987
1988 return ctx;
1989 }
1990
1991 rcu_read_lock();
1992 if (!pid)
1993 task = current;
1994 else
1995 task = find_task_by_vpid(pid);
1996 if (task)
1997 get_task_struct(task);
1998 rcu_read_unlock();
1999
2000 if (!task)
2001 return ERR_PTR(-ESRCH);
2002
2003 /*
2004 * Can't attach events to a dying task.
2005 */
2006 err = -ESRCH;
2007 if (task->flags & PF_EXITING)
2008 goto errout;
2009
2010 /* Reuse ptrace permission checks for now. */
2011 err = -EACCES;
2012 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2013 goto errout;
2014
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002015retry:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002016 ctx = perf_lock_task_context(task, &flags);
2017 if (ctx) {
2018 unclone_ctx(ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002019 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002020 }
2021
2022 if (!ctx) {
Xiao Guangrongaa5452d2009-12-09 11:28:13 +08002023 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002024 err = -ENOMEM;
2025 if (!ctx)
2026 goto errout;
2027 __perf_event_init_context(ctx, task);
2028 get_ctx(ctx);
2029 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
2030 /*
2031 * We raced with some other task; use
2032 * the context they set.
2033 */
2034 kfree(ctx);
2035 goto retry;
2036 }
2037 get_task_struct(task);
2038 }
2039
2040 put_task_struct(task);
2041 return ctx;
2042
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002043errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002044 put_task_struct(task);
2045 return ERR_PTR(err);
2046}
2047
Li Zefan6fb29152009-10-15 11:21:42 +08002048static void perf_event_free_filter(struct perf_event *event);
2049
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002050static void free_event_rcu(struct rcu_head *head)
2051{
2052 struct perf_event *event;
2053
2054 event = container_of(head, struct perf_event, rcu_head);
2055 if (event->ns)
2056 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002057 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002058 kfree(event);
2059}
2060
2061static void perf_pending_sync(struct perf_event *event);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002062static void perf_buffer_put(struct perf_buffer *buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002063
2064static void free_event(struct perf_event *event)
2065{
2066 perf_pending_sync(event);
2067
2068 if (!event->parent) {
2069 atomic_dec(&nr_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002070 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002071 atomic_dec(&nr_mmap_events);
2072 if (event->attr.comm)
2073 atomic_dec(&nr_comm_events);
2074 if (event->attr.task)
2075 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002076 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2077 put_callchain_buffers();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002078 }
2079
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002080 if (event->buffer) {
2081 perf_buffer_put(event->buffer);
2082 event->buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002083 }
2084
2085 if (event->destroy)
2086 event->destroy(event);
2087
2088 put_ctx(event->ctx);
2089 call_rcu(&event->rcu_head, free_event_rcu);
2090}
2091
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002092int perf_event_release_kernel(struct perf_event *event)
2093{
2094 struct perf_event_context *ctx = event->ctx;
2095
Peter Zijlstra050735b2010-05-11 11:51:53 +02002096 /*
2097 * Remove from the PMU, can't get re-enabled since we got
2098 * here because the last ref went.
2099 */
2100 perf_event_disable(event);
2101
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002102 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002103 /*
2104 * There are two ways this annotation is useful:
2105 *
2106 * 1) there is a lock recursion from perf_event_exit_task
2107 * see the comment there.
2108 *
2109 * 2) there is a lock-inversion with mmap_sem through
2110 * perf_event_read_group(), which takes faults while
2111 * holding ctx->mutex, however this is called after
2112 * the last filedesc died, so there is no possibility
2113 * to trigger the AB-BA case.
2114 */
2115 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002116 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002117 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002118 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002119 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002120 mutex_unlock(&ctx->mutex);
2121
2122 mutex_lock(&event->owner->perf_event_mutex);
2123 list_del_init(&event->owner_entry);
2124 mutex_unlock(&event->owner->perf_event_mutex);
2125 put_task_struct(event->owner);
2126
2127 free_event(event);
2128
2129 return 0;
2130}
2131EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2132
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002133/*
2134 * Called when the last reference to the file is gone.
2135 */
2136static int perf_release(struct inode *inode, struct file *file)
2137{
2138 struct perf_event *event = file->private_data;
2139
2140 file->private_data = NULL;
2141
2142 return perf_event_release_kernel(event);
2143}
2144
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002145static int perf_event_read_size(struct perf_event *event)
2146{
2147 int entry = sizeof(u64); /* value */
2148 int size = 0;
2149 int nr = 1;
2150
2151 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2152 size += sizeof(u64);
2153
2154 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2155 size += sizeof(u64);
2156
2157 if (event->attr.read_format & PERF_FORMAT_ID)
2158 entry += sizeof(u64);
2159
2160 if (event->attr.read_format & PERF_FORMAT_GROUP) {
2161 nr += event->group_leader->nr_siblings;
2162 size += sizeof(u64);
2163 }
2164
2165 size += entry * nr;
2166
2167 return size;
2168}
2169
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002170u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002171{
2172 struct perf_event *child;
2173 u64 total = 0;
2174
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002175 *enabled = 0;
2176 *running = 0;
2177
Peter Zijlstra6f105812009-11-20 22:19:56 +01002178 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002179 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002180 *enabled += event->total_time_enabled +
2181 atomic64_read(&event->child_total_time_enabled);
2182 *running += event->total_time_running +
2183 atomic64_read(&event->child_total_time_running);
2184
2185 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002186 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002187 *enabled += child->total_time_enabled;
2188 *running += child->total_time_running;
2189 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002190 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002191
2192 return total;
2193}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002194EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002195
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002196static int perf_event_read_group(struct perf_event *event,
2197 u64 read_format, char __user *buf)
2198{
2199 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002200 int n = 0, size = 0, ret = -EFAULT;
2201 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002202 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002203 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002204
Peter Zijlstra6f105812009-11-20 22:19:56 +01002205 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002206 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002207
2208 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002209 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2210 values[n++] = enabled;
2211 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2212 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002213 values[n++] = count;
2214 if (read_format & PERF_FORMAT_ID)
2215 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002216
2217 size = n * sizeof(u64);
2218
2219 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002220 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002221
Peter Zijlstra6f105812009-11-20 22:19:56 +01002222 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002223
2224 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002225 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002226
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002227 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002228 if (read_format & PERF_FORMAT_ID)
2229 values[n++] = primary_event_id(sub);
2230
2231 size = n * sizeof(u64);
2232
Stephane Eranian184d3da2009-11-23 21:40:49 -08002233 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002234 ret = -EFAULT;
2235 goto unlock;
2236 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002237
2238 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002239 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002240unlock:
2241 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002242
Peter Zijlstraabf48682009-11-20 22:19:49 +01002243 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002244}
2245
2246static int perf_event_read_one(struct perf_event *event,
2247 u64 read_format, char __user *buf)
2248{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002249 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002250 u64 values[4];
2251 int n = 0;
2252
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002253 values[n++] = perf_event_read_value(event, &enabled, &running);
2254 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2255 values[n++] = enabled;
2256 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2257 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002258 if (read_format & PERF_FORMAT_ID)
2259 values[n++] = primary_event_id(event);
2260
2261 if (copy_to_user(buf, values, n * sizeof(u64)))
2262 return -EFAULT;
2263
2264 return n * sizeof(u64);
2265}
2266
2267/*
2268 * Read the performance event - simple non blocking version for now
2269 */
2270static ssize_t
2271perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2272{
2273 u64 read_format = event->attr.read_format;
2274 int ret;
2275
2276 /*
2277 * Return end-of-file for a read on a event that is in
2278 * error state (i.e. because it was pinned but it couldn't be
2279 * scheduled on to the CPU at some point).
2280 */
2281 if (event->state == PERF_EVENT_STATE_ERROR)
2282 return 0;
2283
2284 if (count < perf_event_read_size(event))
2285 return -ENOSPC;
2286
2287 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002288 if (read_format & PERF_FORMAT_GROUP)
2289 ret = perf_event_read_group(event, read_format, buf);
2290 else
2291 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002292
2293 return ret;
2294}
2295
2296static ssize_t
2297perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2298{
2299 struct perf_event *event = file->private_data;
2300
2301 return perf_read_hw(event, buf, count);
2302}
2303
2304static unsigned int perf_poll(struct file *file, poll_table *wait)
2305{
2306 struct perf_event *event = file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002307 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002308 unsigned int events = POLL_HUP;
2309
2310 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002311 buffer = rcu_dereference(event->buffer);
2312 if (buffer)
2313 events = atomic_xchg(&buffer->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002314 rcu_read_unlock();
2315
2316 poll_wait(file, &event->waitq, wait);
2317
2318 return events;
2319}
2320
2321static void perf_event_reset(struct perf_event *event)
2322{
2323 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002324 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002325 perf_event_update_userpage(event);
2326}
2327
2328/*
2329 * Holding the top-level event's child_mutex means that any
2330 * descendant process that has inherited this event will block
2331 * in sync_child_event if it goes to exit, thus satisfying the
2332 * task existence requirements of perf_event_enable/disable.
2333 */
2334static void perf_event_for_each_child(struct perf_event *event,
2335 void (*func)(struct perf_event *))
2336{
2337 struct perf_event *child;
2338
2339 WARN_ON_ONCE(event->ctx->parent_ctx);
2340 mutex_lock(&event->child_mutex);
2341 func(event);
2342 list_for_each_entry(child, &event->child_list, child_list)
2343 func(child);
2344 mutex_unlock(&event->child_mutex);
2345}
2346
2347static void perf_event_for_each(struct perf_event *event,
2348 void (*func)(struct perf_event *))
2349{
2350 struct perf_event_context *ctx = event->ctx;
2351 struct perf_event *sibling;
2352
2353 WARN_ON_ONCE(ctx->parent_ctx);
2354 mutex_lock(&ctx->mutex);
2355 event = event->group_leader;
2356
2357 perf_event_for_each_child(event, func);
2358 func(event);
2359 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2360 perf_event_for_each_child(event, func);
2361 mutex_unlock(&ctx->mutex);
2362}
2363
2364static int perf_event_period(struct perf_event *event, u64 __user *arg)
2365{
2366 struct perf_event_context *ctx = event->ctx;
2367 unsigned long size;
2368 int ret = 0;
2369 u64 value;
2370
2371 if (!event->attr.sample_period)
2372 return -EINVAL;
2373
2374 size = copy_from_user(&value, arg, sizeof(value));
2375 if (size != sizeof(value))
2376 return -EFAULT;
2377
2378 if (!value)
2379 return -EINVAL;
2380
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002381 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002382 if (event->attr.freq) {
2383 if (value > sysctl_perf_event_sample_rate) {
2384 ret = -EINVAL;
2385 goto unlock;
2386 }
2387
2388 event->attr.sample_freq = value;
2389 } else {
2390 event->attr.sample_period = value;
2391 event->hw.sample_period = value;
2392 }
2393unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002394 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002395
2396 return ret;
2397}
2398
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002399static const struct file_operations perf_fops;
2400
2401static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2402{
2403 struct file *file;
2404
2405 file = fget_light(fd, fput_needed);
2406 if (!file)
2407 return ERR_PTR(-EBADF);
2408
2409 if (file->f_op != &perf_fops) {
2410 fput_light(file, *fput_needed);
2411 *fput_needed = 0;
2412 return ERR_PTR(-EBADF);
2413 }
2414
2415 return file->private_data;
2416}
2417
2418static int perf_event_set_output(struct perf_event *event,
2419 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002420static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002421
2422static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2423{
2424 struct perf_event *event = file->private_data;
2425 void (*func)(struct perf_event *);
2426 u32 flags = arg;
2427
2428 switch (cmd) {
2429 case PERF_EVENT_IOC_ENABLE:
2430 func = perf_event_enable;
2431 break;
2432 case PERF_EVENT_IOC_DISABLE:
2433 func = perf_event_disable;
2434 break;
2435 case PERF_EVENT_IOC_RESET:
2436 func = perf_event_reset;
2437 break;
2438
2439 case PERF_EVENT_IOC_REFRESH:
2440 return perf_event_refresh(event, arg);
2441
2442 case PERF_EVENT_IOC_PERIOD:
2443 return perf_event_period(event, (u64 __user *)arg);
2444
2445 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002446 {
2447 struct perf_event *output_event = NULL;
2448 int fput_needed = 0;
2449 int ret;
2450
2451 if (arg != -1) {
2452 output_event = perf_fget_light(arg, &fput_needed);
2453 if (IS_ERR(output_event))
2454 return PTR_ERR(output_event);
2455 }
2456
2457 ret = perf_event_set_output(event, output_event);
2458 if (output_event)
2459 fput_light(output_event->filp, fput_needed);
2460
2461 return ret;
2462 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002463
Li Zefan6fb29152009-10-15 11:21:42 +08002464 case PERF_EVENT_IOC_SET_FILTER:
2465 return perf_event_set_filter(event, (void __user *)arg);
2466
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002467 default:
2468 return -ENOTTY;
2469 }
2470
2471 if (flags & PERF_IOC_FLAG_GROUP)
2472 perf_event_for_each(event, func);
2473 else
2474 perf_event_for_each_child(event, func);
2475
2476 return 0;
2477}
2478
2479int perf_event_task_enable(void)
2480{
2481 struct perf_event *event;
2482
2483 mutex_lock(&current->perf_event_mutex);
2484 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2485 perf_event_for_each_child(event, perf_event_enable);
2486 mutex_unlock(&current->perf_event_mutex);
2487
2488 return 0;
2489}
2490
2491int perf_event_task_disable(void)
2492{
2493 struct perf_event *event;
2494
2495 mutex_lock(&current->perf_event_mutex);
2496 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2497 perf_event_for_each_child(event, perf_event_disable);
2498 mutex_unlock(&current->perf_event_mutex);
2499
2500 return 0;
2501}
2502
2503#ifndef PERF_EVENT_INDEX_OFFSET
2504# define PERF_EVENT_INDEX_OFFSET 0
2505#endif
2506
2507static int perf_event_index(struct perf_event *event)
2508{
2509 if (event->state != PERF_EVENT_STATE_ACTIVE)
2510 return 0;
2511
2512 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2513}
2514
2515/*
2516 * Callers need to ensure there can be no nesting of this function, otherwise
2517 * the seqlock logic goes bad. We can not serialize this because the arch
2518 * code calls this from NMI context.
2519 */
2520void perf_event_update_userpage(struct perf_event *event)
2521{
2522 struct perf_event_mmap_page *userpg;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002523 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002524
2525 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002526 buffer = rcu_dereference(event->buffer);
2527 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002528 goto unlock;
2529
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002530 userpg = buffer->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002531
2532 /*
2533 * Disable preemption so as to not let the corresponding user-space
2534 * spin too long if we get preempted.
2535 */
2536 preempt_disable();
2537 ++userpg->lock;
2538 barrier();
2539 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002540 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002541 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02002542 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002543
2544 userpg->time_enabled = event->total_time_enabled +
2545 atomic64_read(&event->child_total_time_enabled);
2546
2547 userpg->time_running = event->total_time_running +
2548 atomic64_read(&event->child_total_time_running);
2549
2550 barrier();
2551 ++userpg->lock;
2552 preempt_enable();
2553unlock:
2554 rcu_read_unlock();
2555}
2556
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002557static unsigned long perf_data_size(struct perf_buffer *buffer);
2558
2559static void
2560perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2561{
2562 long max_size = perf_data_size(buffer);
2563
2564 if (watermark)
2565 buffer->watermark = min(max_size, watermark);
2566
2567 if (!buffer->watermark)
2568 buffer->watermark = max_size / 2;
2569
2570 if (flags & PERF_BUFFER_WRITABLE)
2571 buffer->writable = 1;
2572
2573 atomic_set(&buffer->refcount, 1);
2574}
2575
Peter Zijlstra906010b2009-09-21 16:08:49 +02002576#ifndef CONFIG_PERF_USE_VMALLOC
2577
2578/*
2579 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2580 */
2581
2582static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002583perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002584{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002585 if (pgoff > buffer->nr_pages)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002586 return NULL;
2587
2588 if (pgoff == 0)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002589 return virt_to_page(buffer->user_page);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002590
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002591 return virt_to_page(buffer->data_pages[pgoff - 1]);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002592}
2593
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002594static void *perf_mmap_alloc_page(int cpu)
2595{
2596 struct page *page;
2597 int node;
2598
2599 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2600 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2601 if (!page)
2602 return NULL;
2603
2604 return page_address(page);
2605}
2606
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002607static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002608perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002609{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002610 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002611 unsigned long size;
2612 int i;
2613
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002614 size = sizeof(struct perf_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002615 size += nr_pages * sizeof(void *);
2616
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002617 buffer = kzalloc(size, GFP_KERNEL);
2618 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002619 goto fail;
2620
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002621 buffer->user_page = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002622 if (!buffer->user_page)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002623 goto fail_user_page;
2624
2625 for (i = 0; i < nr_pages; i++) {
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002626 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002627 if (!buffer->data_pages[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002628 goto fail_data_pages;
2629 }
2630
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002631 buffer->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002632
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002633 perf_buffer_init(buffer, watermark, flags);
2634
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002635 return buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002636
2637fail_data_pages:
2638 for (i--; i >= 0; i--)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002639 free_page((unsigned long)buffer->data_pages[i]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002640
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002641 free_page((unsigned long)buffer->user_page);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002642
2643fail_user_page:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002644 kfree(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002645
2646fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002647 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002648}
2649
2650static void perf_mmap_free_page(unsigned long addr)
2651{
2652 struct page *page = virt_to_page((void *)addr);
2653
2654 page->mapping = NULL;
2655 __free_page(page);
2656}
2657
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002658static void perf_buffer_free(struct perf_buffer *buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002659{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002660 int i;
2661
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002662 perf_mmap_free_page((unsigned long)buffer->user_page);
2663 for (i = 0; i < buffer->nr_pages; i++)
2664 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2665 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002666}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002667
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002668static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002669{
2670 return 0;
2671}
2672
Peter Zijlstra906010b2009-09-21 16:08:49 +02002673#else
2674
2675/*
2676 * Back perf_mmap() with vmalloc memory.
2677 *
2678 * Required for architectures that have d-cache aliasing issues.
2679 */
2680
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002681static inline int page_order(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002682{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002683 return buffer->page_order;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002684}
2685
Peter Zijlstra906010b2009-09-21 16:08:49 +02002686static struct page *
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002687perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002688{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002689 if (pgoff > (1UL << page_order(buffer)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02002690 return NULL;
2691
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002692 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002693}
2694
2695static void perf_mmap_unmark_page(void *addr)
2696{
2697 struct page *page = vmalloc_to_page(addr);
2698
2699 page->mapping = NULL;
2700}
2701
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002702static void perf_buffer_free_work(struct work_struct *work)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002703{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002704 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002705 void *base;
2706 int i, nr;
2707
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002708 buffer = container_of(work, struct perf_buffer, work);
2709 nr = 1 << page_order(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002710
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002711 base = buffer->user_page;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002712 for (i = 0; i < nr + 1; i++)
2713 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2714
2715 vfree(base);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002716 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002717}
2718
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002719static void perf_buffer_free(struct perf_buffer *buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002720{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002721 schedule_work(&buffer->work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002722}
2723
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002724static struct perf_buffer *
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002725perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002726{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002727 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002728 unsigned long size;
2729 void *all_buf;
2730
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002731 size = sizeof(struct perf_buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002732 size += sizeof(void *);
2733
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002734 buffer = kzalloc(size, GFP_KERNEL);
2735 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002736 goto fail;
2737
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002738 INIT_WORK(&buffer->work, perf_buffer_free_work);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002739
2740 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2741 if (!all_buf)
2742 goto fail_all_buf;
2743
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002744 buffer->user_page = all_buf;
2745 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2746 buffer->page_order = ilog2(nr_pages);
2747 buffer->nr_pages = 1;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002748
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002749 perf_buffer_init(buffer, watermark, flags);
2750
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002751 return buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002752
2753fail_all_buf:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002754 kfree(buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002755
2756fail:
2757 return NULL;
2758}
2759
2760#endif
2761
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002762static unsigned long perf_data_size(struct perf_buffer *buffer)
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002763{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002764 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002765}
2766
Peter Zijlstra906010b2009-09-21 16:08:49 +02002767static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2768{
2769 struct perf_event *event = vma->vm_file->private_data;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002770 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002771 int ret = VM_FAULT_SIGBUS;
2772
2773 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2774 if (vmf->pgoff == 0)
2775 ret = 0;
2776 return ret;
2777 }
2778
2779 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002780 buffer = rcu_dereference(event->buffer);
2781 if (!buffer)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002782 goto unlock;
2783
2784 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2785 goto unlock;
2786
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002787 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002788 if (!vmf->page)
2789 goto unlock;
2790
2791 get_page(vmf->page);
2792 vmf->page->mapping = vma->vm_file->f_mapping;
2793 vmf->page->index = vmf->pgoff;
2794
2795 ret = 0;
2796unlock:
2797 rcu_read_unlock();
2798
2799 return ret;
2800}
2801
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002802static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02002803{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002804 struct perf_buffer *buffer;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002805
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002806 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2807 perf_buffer_free(buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002808}
2809
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002810static struct perf_buffer *perf_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002811{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002812 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002813
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002814 rcu_read_lock();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002815 buffer = rcu_dereference(event->buffer);
2816 if (buffer) {
2817 if (!atomic_inc_not_zero(&buffer->refcount))
2818 buffer = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002819 }
2820 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002821
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002822 return buffer;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002823}
2824
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002825static void perf_buffer_put(struct perf_buffer *buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002826{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002827 if (!atomic_dec_and_test(&buffer->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002828 return;
2829
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002830 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002831}
2832
2833static void perf_mmap_open(struct vm_area_struct *vma)
2834{
2835 struct perf_event *event = vma->vm_file->private_data;
2836
2837 atomic_inc(&event->mmap_count);
2838}
2839
2840static void perf_mmap_close(struct vm_area_struct *vma)
2841{
2842 struct perf_event *event = vma->vm_file->private_data;
2843
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002844 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002845 unsigned long size = perf_data_size(event->buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002846 struct user_struct *user = event->mmap_user;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002847 struct perf_buffer *buffer = event->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002848
Peter Zijlstra906010b2009-09-21 16:08:49 +02002849 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002850 vma->vm_mm->locked_vm -= event->mmap_locked;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002851 rcu_assign_pointer(event->buffer, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002852 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002853
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002854 perf_buffer_put(buffer);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002855 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002856 }
2857}
2858
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002859static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002860 .open = perf_mmap_open,
2861 .close = perf_mmap_close,
2862 .fault = perf_mmap_fault,
2863 .page_mkwrite = perf_mmap_fault,
2864};
2865
2866static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2867{
2868 struct perf_event *event = file->private_data;
2869 unsigned long user_locked, user_lock_limit;
2870 struct user_struct *user = current_user();
2871 unsigned long locked, lock_limit;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002872 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002873 unsigned long vma_size;
2874 unsigned long nr_pages;
2875 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002876 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002877
Peter Zijlstrac7920612010-05-18 10:33:24 +02002878 /*
2879 * Don't allow mmap() of inherited per-task counters. This would
2880 * create a performance issue due to all children writing to the
2881 * same buffer.
2882 */
2883 if (event->cpu == -1 && event->attr.inherit)
2884 return -EINVAL;
2885
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002886 if (!(vma->vm_flags & VM_SHARED))
2887 return -EINVAL;
2888
2889 vma_size = vma->vm_end - vma->vm_start;
2890 nr_pages = (vma_size / PAGE_SIZE) - 1;
2891
2892 /*
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002893 * If we have buffer pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002894 * can do bitmasks instead of modulo.
2895 */
2896 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2897 return -EINVAL;
2898
2899 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2900 return -EINVAL;
2901
2902 if (vma->vm_pgoff != 0)
2903 return -EINVAL;
2904
2905 WARN_ON_ONCE(event->ctx->parent_ctx);
2906 mutex_lock(&event->mmap_mutex);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002907 if (event->buffer) {
2908 if (event->buffer->nr_pages == nr_pages)
2909 atomic_inc(&event->buffer->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002910 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002911 ret = -EINVAL;
2912 goto unlock;
2913 }
2914
2915 user_extra = nr_pages + 1;
2916 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2917
2918 /*
2919 * Increase the limit linearly with more CPUs:
2920 */
2921 user_lock_limit *= num_online_cpus();
2922
2923 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2924
2925 extra = 0;
2926 if (user_locked > user_lock_limit)
2927 extra = user_locked - user_lock_limit;
2928
Jiri Slaby78d7d402010-03-05 13:42:54 -08002929 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002930 lock_limit >>= PAGE_SHIFT;
2931 locked = vma->vm_mm->locked_vm + extra;
2932
2933 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2934 !capable(CAP_IPC_LOCK)) {
2935 ret = -EPERM;
2936 goto unlock;
2937 }
2938
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002939 WARN_ON(event->buffer);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002940
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002941 if (vma->vm_flags & VM_WRITE)
2942 flags |= PERF_BUFFER_WRITABLE;
2943
2944 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
2945 event->cpu, flags);
Peter Zijlstraca5135e2010-05-28 19:33:23 +02002946 if (!buffer) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002947 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002948 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002949 }
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02002950 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002951
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002952 atomic_long_add(user_extra, &user->locked_vm);
2953 event->mmap_locked = extra;
2954 event->mmap_user = get_current_user();
2955 vma->vm_mm->locked_vm += event->mmap_locked;
2956
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002957unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002958 if (!ret)
2959 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002960 mutex_unlock(&event->mmap_mutex);
2961
2962 vma->vm_flags |= VM_RESERVED;
2963 vma->vm_ops = &perf_mmap_vmops;
2964
2965 return ret;
2966}
2967
2968static int perf_fasync(int fd, struct file *filp, int on)
2969{
2970 struct inode *inode = filp->f_path.dentry->d_inode;
2971 struct perf_event *event = filp->private_data;
2972 int retval;
2973
2974 mutex_lock(&inode->i_mutex);
2975 retval = fasync_helper(fd, filp, on, &event->fasync);
2976 mutex_unlock(&inode->i_mutex);
2977
2978 if (retval < 0)
2979 return retval;
2980
2981 return 0;
2982}
2983
2984static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01002985 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002986 .release = perf_release,
2987 .read = perf_read,
2988 .poll = perf_poll,
2989 .unlocked_ioctl = perf_ioctl,
2990 .compat_ioctl = perf_ioctl,
2991 .mmap = perf_mmap,
2992 .fasync = perf_fasync,
2993};
2994
2995/*
2996 * Perf event wakeup
2997 *
2998 * If there's data, ensure we set the poll() state and publish everything
2999 * to user-space before waking everybody up.
3000 */
3001
3002void perf_event_wakeup(struct perf_event *event)
3003{
3004 wake_up_all(&event->waitq);
3005
3006 if (event->pending_kill) {
3007 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3008 event->pending_kill = 0;
3009 }
3010}
3011
3012/*
3013 * Pending wakeups
3014 *
3015 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3016 *
3017 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3018 * single linked list and use cmpxchg() to add entries lockless.
3019 */
3020
3021static void perf_pending_event(struct perf_pending_entry *entry)
3022{
3023 struct perf_event *event = container_of(entry,
3024 struct perf_event, pending);
3025
3026 if (event->pending_disable) {
3027 event->pending_disable = 0;
3028 __perf_event_disable(event);
3029 }
3030
3031 if (event->pending_wakeup) {
3032 event->pending_wakeup = 0;
3033 perf_event_wakeup(event);
3034 }
3035}
3036
3037#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3038
3039static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
3040 PENDING_TAIL,
3041};
3042
3043static void perf_pending_queue(struct perf_pending_entry *entry,
3044 void (*func)(struct perf_pending_entry *))
3045{
3046 struct perf_pending_entry **head;
3047
3048 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
3049 return;
3050
3051 entry->func = func;
3052
3053 head = &get_cpu_var(perf_pending_head);
3054
3055 do {
3056 entry->next = *head;
3057 } while (cmpxchg(head, entry->next, entry) != entry->next);
3058
3059 set_perf_event_pending();
3060
3061 put_cpu_var(perf_pending_head);
3062}
3063
3064static int __perf_pending_run(void)
3065{
3066 struct perf_pending_entry *list;
3067 int nr = 0;
3068
3069 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
3070 while (list != PENDING_TAIL) {
3071 void (*func)(struct perf_pending_entry *);
3072 struct perf_pending_entry *entry = list;
3073
3074 list = list->next;
3075
3076 func = entry->func;
3077 entry->next = NULL;
3078 /*
3079 * Ensure we observe the unqueue before we issue the wakeup,
3080 * so that we won't be waiting forever.
3081 * -- see perf_not_pending().
3082 */
3083 smp_wmb();
3084
3085 func(entry);
3086 nr++;
3087 }
3088
3089 return nr;
3090}
3091
3092static inline int perf_not_pending(struct perf_event *event)
3093{
3094 /*
3095 * If we flush on whatever cpu we run, there is a chance we don't
3096 * need to wait.
3097 */
3098 get_cpu();
3099 __perf_pending_run();
3100 put_cpu();
3101
3102 /*
3103 * Ensure we see the proper queue state before going to sleep
3104 * so that we do not miss the wakeup. -- see perf_pending_handle()
3105 */
3106 smp_rmb();
3107 return event->pending.next == NULL;
3108}
3109
3110static void perf_pending_sync(struct perf_event *event)
3111{
3112 wait_event(event->waitq, perf_not_pending(event));
3113}
3114
3115void perf_event_do_pending(void)
3116{
3117 __perf_pending_run();
3118}
3119
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003120/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003121 * We assume there is only KVM supporting the callbacks.
3122 * Later on, we might change it to a list if there is
3123 * another virtualization implementation supporting the callbacks.
3124 */
3125struct perf_guest_info_callbacks *perf_guest_cbs;
3126
3127int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3128{
3129 perf_guest_cbs = cbs;
3130 return 0;
3131}
3132EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3133
3134int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3135{
3136 perf_guest_cbs = NULL;
3137 return 0;
3138}
3139EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3140
3141/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003142 * Output
3143 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003144static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003145 unsigned long offset, unsigned long head)
3146{
3147 unsigned long mask;
3148
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003149 if (!buffer->writable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003150 return true;
3151
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003152 mask = perf_data_size(buffer) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003153
3154 offset = (offset - tail) & mask;
3155 head = (head - tail) & mask;
3156
3157 if ((int)(head - offset) < 0)
3158 return false;
3159
3160 return true;
3161}
3162
3163static void perf_output_wakeup(struct perf_output_handle *handle)
3164{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003165 atomic_set(&handle->buffer->poll, POLL_IN);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003166
3167 if (handle->nmi) {
3168 handle->event->pending_wakeup = 1;
3169 perf_pending_queue(&handle->event->pending,
3170 perf_pending_event);
3171 } else
3172 perf_event_wakeup(handle->event);
3173}
3174
3175/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003176 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003177 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003178 * cannot fully serialize things.
3179 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003180 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003181 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003182 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003183static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003184{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003185 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003186
Peter Zijlstraef607772010-05-18 10:50:41 +02003187 preempt_disable();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003188 local_inc(&buffer->nest);
3189 handle->wakeup = local_read(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003190}
3191
Peter Zijlstraef607772010-05-18 10:50:41 +02003192static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003193{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003194 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003195 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003196
3197again:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003198 head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003199
3200 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003201 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003202 */
3203
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003204 if (!local_dec_and_test(&buffer->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003205 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003206
3207 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003208 * Publish the known good head. Rely on the full barrier implied
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003209 * by atomic_dec_and_test() order the buffer->head read and this
Peter Zijlstraef607772010-05-18 10:50:41 +02003210 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003211 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003212 buffer->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003213
Peter Zijlstraef607772010-05-18 10:50:41 +02003214 /*
3215 * Now check if we missed an update, rely on the (compiler)
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003216 * barrier in atomic_dec_and_test() to re-read buffer->head.
Peter Zijlstraef607772010-05-18 10:50:41 +02003217 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003218 if (unlikely(head != local_read(&buffer->head))) {
3219 local_inc(&buffer->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003220 goto again;
3221 }
3222
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003223 if (handle->wakeup != local_read(&buffer->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003224 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003225
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003226out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003227 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003228}
3229
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003230__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003231 const void *buf, unsigned int len)
3232{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003233 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003234 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003235
3236 memcpy(handle->addr, buf, size);
3237
3238 len -= size;
3239 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003240 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003241 handle->size -= size;
3242 if (!handle->size) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003243 struct perf_buffer *buffer = handle->buffer;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003244
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003245 handle->page++;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003246 handle->page &= buffer->nr_pages - 1;
3247 handle->addr = buffer->data_pages[handle->page];
3248 handle->size = PAGE_SIZE << page_order(buffer);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003249 }
3250 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003251}
3252
3253int perf_output_begin(struct perf_output_handle *handle,
3254 struct perf_event *event, unsigned int size,
3255 int nmi, int sample)
3256{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003257 struct perf_buffer *buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003258 unsigned long tail, offset, head;
3259 int have_lost;
3260 struct {
3261 struct perf_event_header header;
3262 u64 id;
3263 u64 lost;
3264 } lost_event;
3265
3266 rcu_read_lock();
3267 /*
3268 * For inherited events we send all the output towards the parent.
3269 */
3270 if (event->parent)
3271 event = event->parent;
3272
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003273 buffer = rcu_dereference(event->buffer);
3274 if (!buffer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003275 goto out;
3276
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003277 handle->buffer = buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003278 handle->event = event;
3279 handle->nmi = nmi;
3280 handle->sample = sample;
3281
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003282 if (!buffer->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003283 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003284
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003285 have_lost = local_read(&buffer->lost);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003286 if (have_lost)
3287 size += sizeof(lost_event);
3288
Peter Zijlstraef607772010-05-18 10:50:41 +02003289 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003290
3291 do {
3292 /*
3293 * Userspace could choose to issue a mb() before updating the
3294 * tail pointer. So that all reads will be completed before the
3295 * write is issued.
3296 */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003297 tail = ACCESS_ONCE(buffer->user_page->data_tail);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003298 smp_rmb();
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003299 offset = head = local_read(&buffer->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003300 head += size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003301 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003302 goto fail;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003303 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003304
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003305 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3306 local_add(buffer->watermark, &buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003307
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003308 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3309 handle->page &= buffer->nr_pages - 1;
3310 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3311 handle->addr = buffer->data_pages[handle->page];
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003312 handle->addr += handle->size;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003313 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003314
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003315 if (have_lost) {
3316 lost_event.header.type = PERF_RECORD_LOST;
3317 lost_event.header.misc = 0;
3318 lost_event.header.size = sizeof(lost_event);
3319 lost_event.id = event->id;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003320 lost_event.lost = local_xchg(&buffer->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003321
3322 perf_output_put(handle, lost_event);
3323 }
3324
3325 return 0;
3326
3327fail:
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003328 local_inc(&buffer->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003329 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003330out:
3331 rcu_read_unlock();
3332
3333 return -ENOSPC;
3334}
3335
3336void perf_output_end(struct perf_output_handle *handle)
3337{
3338 struct perf_event *event = handle->event;
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003339 struct perf_buffer *buffer = handle->buffer;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003340
3341 int wakeup_events = event->attr.wakeup_events;
3342
3343 if (handle->sample && wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003344 int events = local_inc_return(&buffer->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003345 if (events >= wakeup_events) {
Peter Zijlstraca5135e2010-05-28 19:33:23 +02003346 local_sub(wakeup_events, &buffer->events);
3347 local_inc(&buffer->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003348 }
3349 }
3350
Peter Zijlstraef607772010-05-18 10:50:41 +02003351 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003352 rcu_read_unlock();
3353}
3354
3355static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3356{
3357 /*
3358 * only top level events have the pid namespace they were created in
3359 */
3360 if (event->parent)
3361 event = event->parent;
3362
3363 return task_tgid_nr_ns(p, event->ns);
3364}
3365
3366static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3367{
3368 /*
3369 * only top level events have the pid namespace they were created in
3370 */
3371 if (event->parent)
3372 event = event->parent;
3373
3374 return task_pid_nr_ns(p, event->ns);
3375}
3376
3377static void perf_output_read_one(struct perf_output_handle *handle,
3378 struct perf_event *event)
3379{
3380 u64 read_format = event->attr.read_format;
3381 u64 values[4];
3382 int n = 0;
3383
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003384 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003385 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3386 values[n++] = event->total_time_enabled +
3387 atomic64_read(&event->child_total_time_enabled);
3388 }
3389 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3390 values[n++] = event->total_time_running +
3391 atomic64_read(&event->child_total_time_running);
3392 }
3393 if (read_format & PERF_FORMAT_ID)
3394 values[n++] = primary_event_id(event);
3395
3396 perf_output_copy(handle, values, n * sizeof(u64));
3397}
3398
3399/*
3400 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3401 */
3402static void perf_output_read_group(struct perf_output_handle *handle,
3403 struct perf_event *event)
3404{
3405 struct perf_event *leader = event->group_leader, *sub;
3406 u64 read_format = event->attr.read_format;
3407 u64 values[5];
3408 int n = 0;
3409
3410 values[n++] = 1 + leader->nr_siblings;
3411
3412 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3413 values[n++] = leader->total_time_enabled;
3414
3415 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3416 values[n++] = leader->total_time_running;
3417
3418 if (leader != event)
3419 leader->pmu->read(leader);
3420
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003421 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003422 if (read_format & PERF_FORMAT_ID)
3423 values[n++] = primary_event_id(leader);
3424
3425 perf_output_copy(handle, values, n * sizeof(u64));
3426
3427 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3428 n = 0;
3429
3430 if (sub != event)
3431 sub->pmu->read(sub);
3432
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003433 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003434 if (read_format & PERF_FORMAT_ID)
3435 values[n++] = primary_event_id(sub);
3436
3437 perf_output_copy(handle, values, n * sizeof(u64));
3438 }
3439}
3440
3441static void perf_output_read(struct perf_output_handle *handle,
3442 struct perf_event *event)
3443{
3444 if (event->attr.read_format & PERF_FORMAT_GROUP)
3445 perf_output_read_group(handle, event);
3446 else
3447 perf_output_read_one(handle, event);
3448}
3449
3450void perf_output_sample(struct perf_output_handle *handle,
3451 struct perf_event_header *header,
3452 struct perf_sample_data *data,
3453 struct perf_event *event)
3454{
3455 u64 sample_type = data->type;
3456
3457 perf_output_put(handle, *header);
3458
3459 if (sample_type & PERF_SAMPLE_IP)
3460 perf_output_put(handle, data->ip);
3461
3462 if (sample_type & PERF_SAMPLE_TID)
3463 perf_output_put(handle, data->tid_entry);
3464
3465 if (sample_type & PERF_SAMPLE_TIME)
3466 perf_output_put(handle, data->time);
3467
3468 if (sample_type & PERF_SAMPLE_ADDR)
3469 perf_output_put(handle, data->addr);
3470
3471 if (sample_type & PERF_SAMPLE_ID)
3472 perf_output_put(handle, data->id);
3473
3474 if (sample_type & PERF_SAMPLE_STREAM_ID)
3475 perf_output_put(handle, data->stream_id);
3476
3477 if (sample_type & PERF_SAMPLE_CPU)
3478 perf_output_put(handle, data->cpu_entry);
3479
3480 if (sample_type & PERF_SAMPLE_PERIOD)
3481 perf_output_put(handle, data->period);
3482
3483 if (sample_type & PERF_SAMPLE_READ)
3484 perf_output_read(handle, event);
3485
3486 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3487 if (data->callchain) {
3488 int size = 1;
3489
3490 if (data->callchain)
3491 size += data->callchain->nr;
3492
3493 size *= sizeof(u64);
3494
3495 perf_output_copy(handle, data->callchain, size);
3496 } else {
3497 u64 nr = 0;
3498 perf_output_put(handle, nr);
3499 }
3500 }
3501
3502 if (sample_type & PERF_SAMPLE_RAW) {
3503 if (data->raw) {
3504 perf_output_put(handle, data->raw->size);
3505 perf_output_copy(handle, data->raw->data,
3506 data->raw->size);
3507 } else {
3508 struct {
3509 u32 size;
3510 u32 data;
3511 } raw = {
3512 .size = sizeof(u32),
3513 .data = 0,
3514 };
3515 perf_output_put(handle, raw);
3516 }
3517 }
3518}
3519
3520void perf_prepare_sample(struct perf_event_header *header,
3521 struct perf_sample_data *data,
3522 struct perf_event *event,
3523 struct pt_regs *regs)
3524{
3525 u64 sample_type = event->attr.sample_type;
3526
3527 data->type = sample_type;
3528
3529 header->type = PERF_RECORD_SAMPLE;
3530 header->size = sizeof(*header);
3531
3532 header->misc = 0;
3533 header->misc |= perf_misc_flags(regs);
3534
3535 if (sample_type & PERF_SAMPLE_IP) {
3536 data->ip = perf_instruction_pointer(regs);
3537
3538 header->size += sizeof(data->ip);
3539 }
3540
3541 if (sample_type & PERF_SAMPLE_TID) {
3542 /* namespace issues */
3543 data->tid_entry.pid = perf_event_pid(event, current);
3544 data->tid_entry.tid = perf_event_tid(event, current);
3545
3546 header->size += sizeof(data->tid_entry);
3547 }
3548
3549 if (sample_type & PERF_SAMPLE_TIME) {
3550 data->time = perf_clock();
3551
3552 header->size += sizeof(data->time);
3553 }
3554
3555 if (sample_type & PERF_SAMPLE_ADDR)
3556 header->size += sizeof(data->addr);
3557
3558 if (sample_type & PERF_SAMPLE_ID) {
3559 data->id = primary_event_id(event);
3560
3561 header->size += sizeof(data->id);
3562 }
3563
3564 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3565 data->stream_id = event->id;
3566
3567 header->size += sizeof(data->stream_id);
3568 }
3569
3570 if (sample_type & PERF_SAMPLE_CPU) {
3571 data->cpu_entry.cpu = raw_smp_processor_id();
3572 data->cpu_entry.reserved = 0;
3573
3574 header->size += sizeof(data->cpu_entry);
3575 }
3576
3577 if (sample_type & PERF_SAMPLE_PERIOD)
3578 header->size += sizeof(data->period);
3579
3580 if (sample_type & PERF_SAMPLE_READ)
3581 header->size += perf_event_read_size(event);
3582
3583 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3584 int size = 1;
3585
3586 data->callchain = perf_callchain(regs);
3587
3588 if (data->callchain)
3589 size += data->callchain->nr;
3590
3591 header->size += size * sizeof(u64);
3592 }
3593
3594 if (sample_type & PERF_SAMPLE_RAW) {
3595 int size = sizeof(u32);
3596
3597 if (data->raw)
3598 size += data->raw->size;
3599 else
3600 size += sizeof(u32);
3601
3602 WARN_ON_ONCE(size & (sizeof(u64)-1));
3603 header->size += size;
3604 }
3605}
3606
3607static void perf_event_output(struct perf_event *event, int nmi,
3608 struct perf_sample_data *data,
3609 struct pt_regs *regs)
3610{
3611 struct perf_output_handle handle;
3612 struct perf_event_header header;
3613
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003614 /* protect the callchain buffers */
3615 rcu_read_lock();
3616
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003617 perf_prepare_sample(&header, data, event, regs);
3618
3619 if (perf_output_begin(&handle, event, header.size, nmi, 1))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003620 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003621
3622 perf_output_sample(&handle, &header, data, event);
3623
3624 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003625
3626exit:
3627 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003628}
3629
3630/*
3631 * read event_id
3632 */
3633
3634struct perf_read_event {
3635 struct perf_event_header header;
3636
3637 u32 pid;
3638 u32 tid;
3639};
3640
3641static void
3642perf_event_read_event(struct perf_event *event,
3643 struct task_struct *task)
3644{
3645 struct perf_output_handle handle;
3646 struct perf_read_event read_event = {
3647 .header = {
3648 .type = PERF_RECORD_READ,
3649 .misc = 0,
3650 .size = sizeof(read_event) + perf_event_read_size(event),
3651 },
3652 .pid = perf_event_pid(event, task),
3653 .tid = perf_event_tid(event, task),
3654 };
3655 int ret;
3656
3657 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3658 if (ret)
3659 return;
3660
3661 perf_output_put(&handle, read_event);
3662 perf_output_read(&handle, event);
3663
3664 perf_output_end(&handle);
3665}
3666
3667/*
3668 * task tracking -- fork/exit
3669 *
Eric B Munson3af9e852010-05-18 15:30:49 +01003670 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003671 */
3672
3673struct perf_task_event {
3674 struct task_struct *task;
3675 struct perf_event_context *task_ctx;
3676
3677 struct {
3678 struct perf_event_header header;
3679
3680 u32 pid;
3681 u32 ppid;
3682 u32 tid;
3683 u32 ptid;
3684 u64 time;
3685 } event_id;
3686};
3687
3688static void perf_event_task_output(struct perf_event *event,
3689 struct perf_task_event *task_event)
3690{
3691 struct perf_output_handle handle;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003692 struct task_struct *task = task_event->task;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003693 int size, ret;
3694
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003695 size = task_event->event_id.header.size;
3696 ret = perf_output_begin(&handle, event, size, 0, 0);
3697
Peter Zijlstraef607772010-05-18 10:50:41 +02003698 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003699 return;
3700
3701 task_event->event_id.pid = perf_event_pid(event, task);
3702 task_event->event_id.ppid = perf_event_pid(event, current);
3703
3704 task_event->event_id.tid = perf_event_tid(event, task);
3705 task_event->event_id.ptid = perf_event_tid(event, current);
3706
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003707 perf_output_put(&handle, task_event->event_id);
3708
3709 perf_output_end(&handle);
3710}
3711
3712static int perf_event_task_match(struct perf_event *event)
3713{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003714 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003715 return 0;
3716
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003717 if (event->cpu != -1 && event->cpu != smp_processor_id())
3718 return 0;
3719
Eric B Munson3af9e852010-05-18 15:30:49 +01003720 if (event->attr.comm || event->attr.mmap ||
3721 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003722 return 1;
3723
3724 return 0;
3725}
3726
3727static void perf_event_task_ctx(struct perf_event_context *ctx,
3728 struct perf_task_event *task_event)
3729{
3730 struct perf_event *event;
3731
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003732 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3733 if (perf_event_task_match(event))
3734 perf_event_task_output(event, task_event);
3735 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003736}
3737
3738static void perf_event_task_event(struct perf_task_event *task_event)
3739{
3740 struct perf_cpu_context *cpuctx;
3741 struct perf_event_context *ctx = task_event->task_ctx;
3742
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003743 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003744 cpuctx = &get_cpu_var(perf_cpu_context);
3745 perf_event_task_ctx(&cpuctx->ctx, task_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003746 if (!ctx)
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003747 ctx = rcu_dereference(current->perf_event_ctxp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003748 if (ctx)
3749 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003750 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003751 rcu_read_unlock();
3752}
3753
3754static void perf_event_task(struct task_struct *task,
3755 struct perf_event_context *task_ctx,
3756 int new)
3757{
3758 struct perf_task_event task_event;
3759
3760 if (!atomic_read(&nr_comm_events) &&
3761 !atomic_read(&nr_mmap_events) &&
3762 !atomic_read(&nr_task_events))
3763 return;
3764
3765 task_event = (struct perf_task_event){
3766 .task = task,
3767 .task_ctx = task_ctx,
3768 .event_id = {
3769 .header = {
3770 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3771 .misc = 0,
3772 .size = sizeof(task_event.event_id),
3773 },
3774 /* .pid */
3775 /* .ppid */
3776 /* .tid */
3777 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003778 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003779 },
3780 };
3781
3782 perf_event_task_event(&task_event);
3783}
3784
3785void perf_event_fork(struct task_struct *task)
3786{
3787 perf_event_task(task, NULL, 1);
3788}
3789
3790/*
3791 * comm tracking
3792 */
3793
3794struct perf_comm_event {
3795 struct task_struct *task;
3796 char *comm;
3797 int comm_size;
3798
3799 struct {
3800 struct perf_event_header header;
3801
3802 u32 pid;
3803 u32 tid;
3804 } event_id;
3805};
3806
3807static void perf_event_comm_output(struct perf_event *event,
3808 struct perf_comm_event *comm_event)
3809{
3810 struct perf_output_handle handle;
3811 int size = comm_event->event_id.header.size;
3812 int ret = perf_output_begin(&handle, event, size, 0, 0);
3813
3814 if (ret)
3815 return;
3816
3817 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3818 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3819
3820 perf_output_put(&handle, comm_event->event_id);
3821 perf_output_copy(&handle, comm_event->comm,
3822 comm_event->comm_size);
3823 perf_output_end(&handle);
3824}
3825
3826static int perf_event_comm_match(struct perf_event *event)
3827{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003828 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003829 return 0;
3830
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003831 if (event->cpu != -1 && event->cpu != smp_processor_id())
3832 return 0;
3833
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003834 if (event->attr.comm)
3835 return 1;
3836
3837 return 0;
3838}
3839
3840static void perf_event_comm_ctx(struct perf_event_context *ctx,
3841 struct perf_comm_event *comm_event)
3842{
3843 struct perf_event *event;
3844
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003845 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3846 if (perf_event_comm_match(event))
3847 perf_event_comm_output(event, comm_event);
3848 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003849}
3850
3851static void perf_event_comm_event(struct perf_comm_event *comm_event)
3852{
3853 struct perf_cpu_context *cpuctx;
3854 struct perf_event_context *ctx;
3855 unsigned int size;
3856 char comm[TASK_COMM_LEN];
3857
3858 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01003859 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003860 size = ALIGN(strlen(comm)+1, sizeof(u64));
3861
3862 comm_event->comm = comm;
3863 comm_event->comm_size = size;
3864
3865 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3866
Peter Zijlstraf6595f32009-11-20 22:19:47 +01003867 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003868 cpuctx = &get_cpu_var(perf_cpu_context);
3869 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003870 ctx = rcu_dereference(current->perf_event_ctxp);
3871 if (ctx)
3872 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003873 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003874 rcu_read_unlock();
3875}
3876
3877void perf_event_comm(struct task_struct *task)
3878{
3879 struct perf_comm_event comm_event;
3880
3881 if (task->perf_event_ctxp)
3882 perf_event_enable_on_exec(task);
3883
3884 if (!atomic_read(&nr_comm_events))
3885 return;
3886
3887 comm_event = (struct perf_comm_event){
3888 .task = task,
3889 /* .comm */
3890 /* .comm_size */
3891 .event_id = {
3892 .header = {
3893 .type = PERF_RECORD_COMM,
3894 .misc = 0,
3895 /* .size */
3896 },
3897 /* .pid */
3898 /* .tid */
3899 },
3900 };
3901
3902 perf_event_comm_event(&comm_event);
3903}
3904
3905/*
3906 * mmap tracking
3907 */
3908
3909struct perf_mmap_event {
3910 struct vm_area_struct *vma;
3911
3912 const char *file_name;
3913 int file_size;
3914
3915 struct {
3916 struct perf_event_header header;
3917
3918 u32 pid;
3919 u32 tid;
3920 u64 start;
3921 u64 len;
3922 u64 pgoff;
3923 } event_id;
3924};
3925
3926static void perf_event_mmap_output(struct perf_event *event,
3927 struct perf_mmap_event *mmap_event)
3928{
3929 struct perf_output_handle handle;
3930 int size = mmap_event->event_id.header.size;
3931 int ret = perf_output_begin(&handle, event, size, 0, 0);
3932
3933 if (ret)
3934 return;
3935
3936 mmap_event->event_id.pid = perf_event_pid(event, current);
3937 mmap_event->event_id.tid = perf_event_tid(event, current);
3938
3939 perf_output_put(&handle, mmap_event->event_id);
3940 perf_output_copy(&handle, mmap_event->file_name,
3941 mmap_event->file_size);
3942 perf_output_end(&handle);
3943}
3944
3945static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01003946 struct perf_mmap_event *mmap_event,
3947 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003948{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003949 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003950 return 0;
3951
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003952 if (event->cpu != -1 && event->cpu != smp_processor_id())
3953 return 0;
3954
Eric B Munson3af9e852010-05-18 15:30:49 +01003955 if ((!executable && event->attr.mmap_data) ||
3956 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003957 return 1;
3958
3959 return 0;
3960}
3961
3962static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01003963 struct perf_mmap_event *mmap_event,
3964 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003965{
3966 struct perf_event *event;
3967
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003968 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01003969 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003970 perf_event_mmap_output(event, mmap_event);
3971 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003972}
3973
3974static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3975{
3976 struct perf_cpu_context *cpuctx;
3977 struct perf_event_context *ctx;
3978 struct vm_area_struct *vma = mmap_event->vma;
3979 struct file *file = vma->vm_file;
3980 unsigned int size;
3981 char tmp[16];
3982 char *buf = NULL;
3983 const char *name;
3984
3985 memset(tmp, 0, sizeof(tmp));
3986
3987 if (file) {
3988 /*
3989 * d_path works from the end of the buffer backwards, so we
3990 * need to add enough zero bytes after the string to handle
3991 * the 64bit alignment we do later.
3992 */
3993 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3994 if (!buf) {
3995 name = strncpy(tmp, "//enomem", sizeof(tmp));
3996 goto got_name;
3997 }
3998 name = d_path(&file->f_path, buf, PATH_MAX);
3999 if (IS_ERR(name)) {
4000 name = strncpy(tmp, "//toolong", sizeof(tmp));
4001 goto got_name;
4002 }
4003 } else {
4004 if (arch_vma_name(mmap_event->vma)) {
4005 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4006 sizeof(tmp));
4007 goto got_name;
4008 }
4009
4010 if (!vma->vm_mm) {
4011 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4012 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004013 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4014 vma->vm_end >= vma->vm_mm->brk) {
4015 name = strncpy(tmp, "[heap]", sizeof(tmp));
4016 goto got_name;
4017 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4018 vma->vm_end >= vma->vm_mm->start_stack) {
4019 name = strncpy(tmp, "[stack]", sizeof(tmp));
4020 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004021 }
4022
4023 name = strncpy(tmp, "//anon", sizeof(tmp));
4024 goto got_name;
4025 }
4026
4027got_name:
4028 size = ALIGN(strlen(name)+1, sizeof(u64));
4029
4030 mmap_event->file_name = name;
4031 mmap_event->file_size = size;
4032
4033 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4034
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004035 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004036 cpuctx = &get_cpu_var(perf_cpu_context);
Eric B Munson3af9e852010-05-18 15:30:49 +01004037 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004038 ctx = rcu_dereference(current->perf_event_ctxp);
4039 if (ctx)
Eric B Munson3af9e852010-05-18 15:30:49 +01004040 perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004041 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004042 rcu_read_unlock();
4043
4044 kfree(buf);
4045}
4046
Eric B Munson3af9e852010-05-18 15:30:49 +01004047void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004048{
4049 struct perf_mmap_event mmap_event;
4050
4051 if (!atomic_read(&nr_mmap_events))
4052 return;
4053
4054 mmap_event = (struct perf_mmap_event){
4055 .vma = vma,
4056 /* .file_name */
4057 /* .file_size */
4058 .event_id = {
4059 .header = {
4060 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004061 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004062 /* .size */
4063 },
4064 /* .pid */
4065 /* .tid */
4066 .start = vma->vm_start,
4067 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004068 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004069 },
4070 };
4071
4072 perf_event_mmap_event(&mmap_event);
4073}
4074
4075/*
4076 * IRQ throttle logging
4077 */
4078
4079static void perf_log_throttle(struct perf_event *event, int enable)
4080{
4081 struct perf_output_handle handle;
4082 int ret;
4083
4084 struct {
4085 struct perf_event_header header;
4086 u64 time;
4087 u64 id;
4088 u64 stream_id;
4089 } throttle_event = {
4090 .header = {
4091 .type = PERF_RECORD_THROTTLE,
4092 .misc = 0,
4093 .size = sizeof(throttle_event),
4094 },
4095 .time = perf_clock(),
4096 .id = primary_event_id(event),
4097 .stream_id = event->id,
4098 };
4099
4100 if (enable)
4101 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4102
4103 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4104 if (ret)
4105 return;
4106
4107 perf_output_put(&handle, throttle_event);
4108 perf_output_end(&handle);
4109}
4110
4111/*
4112 * Generic event overflow handling, sampling.
4113 */
4114
4115static int __perf_event_overflow(struct perf_event *event, int nmi,
4116 int throttle, struct perf_sample_data *data,
4117 struct pt_regs *regs)
4118{
4119 int events = atomic_read(&event->event_limit);
4120 struct hw_perf_event *hwc = &event->hw;
4121 int ret = 0;
4122
4123 throttle = (throttle && event->pmu->unthrottle != NULL);
4124
4125 if (!throttle) {
4126 hwc->interrupts++;
4127 } else {
4128 if (hwc->interrupts != MAX_INTERRUPTS) {
4129 hwc->interrupts++;
4130 if (HZ * hwc->interrupts >
4131 (u64)sysctl_perf_event_sample_rate) {
4132 hwc->interrupts = MAX_INTERRUPTS;
4133 perf_log_throttle(event, 0);
4134 ret = 1;
4135 }
4136 } else {
4137 /*
4138 * Keep re-disabling events even though on the previous
4139 * pass we disabled it - just in case we raced with a
4140 * sched-in and the event got enabled again:
4141 */
4142 ret = 1;
4143 }
4144 }
4145
4146 if (event->attr.freq) {
4147 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004148 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004149
Peter Zijlstraabd50712010-01-26 18:50:16 +01004150 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004151
Peter Zijlstraabd50712010-01-26 18:50:16 +01004152 if (delta > 0 && delta < 2*TICK_NSEC)
4153 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004154 }
4155
4156 /*
4157 * XXX event_limit might not quite work as expected on inherited
4158 * events
4159 */
4160
4161 event->pending_kill = POLL_IN;
4162 if (events && atomic_dec_and_test(&event->event_limit)) {
4163 ret = 1;
4164 event->pending_kill = POLL_HUP;
4165 if (nmi) {
4166 event->pending_disable = 1;
4167 perf_pending_queue(&event->pending,
4168 perf_pending_event);
4169 } else
4170 perf_event_disable(event);
4171 }
4172
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004173 if (event->overflow_handler)
4174 event->overflow_handler(event, nmi, data, regs);
4175 else
4176 perf_event_output(event, nmi, data, regs);
4177
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004178 return ret;
4179}
4180
4181int perf_event_overflow(struct perf_event *event, int nmi,
4182 struct perf_sample_data *data,
4183 struct pt_regs *regs)
4184{
4185 return __perf_event_overflow(event, nmi, 1, data, regs);
4186}
4187
4188/*
4189 * Generic software event infrastructure
4190 */
4191
4192/*
4193 * We directly increment event->count and keep a second value in
4194 * event->hw.period_left to count intervals. This period event
4195 * is kept in the range [-sample_period, 0] so that we can use the
4196 * sign as trigger.
4197 */
4198
4199static u64 perf_swevent_set_period(struct perf_event *event)
4200{
4201 struct hw_perf_event *hwc = &event->hw;
4202 u64 period = hwc->last_period;
4203 u64 nr, offset;
4204 s64 old, val;
4205
4206 hwc->last_period = hwc->sample_period;
4207
4208again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004209 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004210 if (val < 0)
4211 return 0;
4212
4213 nr = div64_u64(period + val, period);
4214 offset = nr * period;
4215 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004216 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004217 goto again;
4218
4219 return nr;
4220}
4221
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004222static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004223 int nmi, struct perf_sample_data *data,
4224 struct pt_regs *regs)
4225{
4226 struct hw_perf_event *hwc = &event->hw;
4227 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004228
4229 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004230 if (!overflow)
4231 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004232
4233 if (hwc->interrupts == MAX_INTERRUPTS)
4234 return;
4235
4236 for (; overflow; overflow--) {
4237 if (__perf_event_overflow(event, nmi, throttle,
4238 data, regs)) {
4239 /*
4240 * We inhibit the overflow from happening when
4241 * hwc->interrupts == MAX_INTERRUPTS.
4242 */
4243 break;
4244 }
4245 throttle = 1;
4246 }
4247}
4248
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004249static void perf_swevent_add(struct perf_event *event, u64 nr,
4250 int nmi, struct perf_sample_data *data,
4251 struct pt_regs *regs)
4252{
4253 struct hw_perf_event *hwc = &event->hw;
4254
Peter Zijlstrae7850592010-05-21 14:43:08 +02004255 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004256
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004257 if (!regs)
4258 return;
4259
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004260 if (!hwc->sample_period)
4261 return;
4262
4263 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4264 return perf_swevent_overflow(event, 1, nmi, data, regs);
4265
Peter Zijlstrae7850592010-05-21 14:43:08 +02004266 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004267 return;
4268
4269 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004270}
4271
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004272static int perf_exclude_event(struct perf_event *event,
4273 struct pt_regs *regs)
4274{
4275 if (regs) {
4276 if (event->attr.exclude_user && user_mode(regs))
4277 return 1;
4278
4279 if (event->attr.exclude_kernel && !user_mode(regs))
4280 return 1;
4281 }
4282
4283 return 0;
4284}
4285
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004286static int perf_swevent_match(struct perf_event *event,
4287 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004288 u32 event_id,
4289 struct perf_sample_data *data,
4290 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004291{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004292 if (event->attr.type != type)
4293 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004294
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004295 if (event->attr.config != event_id)
4296 return 0;
4297
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004298 if (perf_exclude_event(event, regs))
4299 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004300
4301 return 1;
4302}
4303
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004304static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004305{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004306 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004307
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004308 return hash_64(val, SWEVENT_HLIST_BITS);
4309}
4310
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004311static inline struct hlist_head *
4312__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004313{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004314 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004315
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004316 return &hlist->heads[hash];
4317}
4318
4319/* For the read side: events when they trigger */
4320static inline struct hlist_head *
4321find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4322{
4323 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004324
4325 hlist = rcu_dereference(ctx->swevent_hlist);
4326 if (!hlist)
4327 return NULL;
4328
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004329 return __find_swevent_head(hlist, type, event_id);
4330}
4331
4332/* For the event head insertion and removal in the hlist */
4333static inline struct hlist_head *
4334find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4335{
4336 struct swevent_hlist *hlist;
4337 u32 event_id = event->attr.config;
4338 u64 type = event->attr.type;
4339
4340 /*
4341 * Event scheduling is always serialized against hlist allocation
4342 * and release. Which makes the protected version suitable here.
4343 * The context lock guarantees that.
4344 */
4345 hlist = rcu_dereference_protected(ctx->swevent_hlist,
4346 lockdep_is_held(&event->ctx->lock));
4347 if (!hlist)
4348 return NULL;
4349
4350 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004351}
4352
4353static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4354 u64 nr, int nmi,
4355 struct perf_sample_data *data,
4356 struct pt_regs *regs)
4357{
4358 struct perf_cpu_context *cpuctx;
4359 struct perf_event *event;
4360 struct hlist_node *node;
4361 struct hlist_head *head;
4362
4363 cpuctx = &__get_cpu_var(perf_cpu_context);
4364
4365 rcu_read_lock();
4366
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004367 head = find_swevent_head_rcu(cpuctx, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004368
4369 if (!head)
4370 goto end;
4371
4372 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004373 if (perf_swevent_match(event, type, event_id, data, regs))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004374 perf_swevent_add(event, nr, nmi, data, regs);
4375 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004376end:
4377 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004378}
4379
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004380int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004381{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004382 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004383
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004384 return get_recursion_context(cpuctx->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004385}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004386EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004387
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004388void inline perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004389{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004390 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004391
4392 put_recursion_context(cpuctx->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004393}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004394
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004395void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4396 struct pt_regs *regs, u64 addr)
4397{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004398 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004399 int rctx;
4400
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004401 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004402 rctx = perf_swevent_get_recursion_context();
4403 if (rctx < 0)
4404 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004405
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004406 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004407
4408 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004409
4410 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004411 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004412}
4413
4414static void perf_swevent_read(struct perf_event *event)
4415{
4416}
4417
4418static int perf_swevent_enable(struct perf_event *event)
4419{
4420 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004421 struct perf_cpu_context *cpuctx;
4422 struct hlist_head *head;
4423
4424 cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004425
4426 if (hwc->sample_period) {
4427 hwc->last_period = hwc->sample_period;
4428 perf_swevent_set_period(event);
4429 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004430
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004431 head = find_swevent_head(cpuctx, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004432 if (WARN_ON_ONCE(!head))
4433 return -EINVAL;
4434
4435 hlist_add_head_rcu(&event->hlist_entry, head);
4436
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004437 return 0;
4438}
4439
4440static void perf_swevent_disable(struct perf_event *event)
4441{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004442 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004443}
4444
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004445static void perf_swevent_void(struct perf_event *event)
4446{
4447}
4448
4449static int perf_swevent_int(struct perf_event *event)
4450{
4451 return 0;
4452}
4453
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004454/* Deref the hlist from the update side */
4455static inline struct swevent_hlist *
4456swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4457{
4458 return rcu_dereference_protected(cpuctx->swevent_hlist,
4459 lockdep_is_held(&cpuctx->hlist_mutex));
4460}
4461
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004462static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4463{
4464 struct swevent_hlist *hlist;
4465
4466 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4467 kfree(hlist);
4468}
4469
4470static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4471{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004472 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004473
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004474 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004475 return;
4476
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004477 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4478 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4479}
4480
4481static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4482{
4483 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4484
4485 mutex_lock(&cpuctx->hlist_mutex);
4486
4487 if (!--cpuctx->hlist_refcount)
4488 swevent_hlist_release(cpuctx);
4489
4490 mutex_unlock(&cpuctx->hlist_mutex);
4491}
4492
4493static void swevent_hlist_put(struct perf_event *event)
4494{
4495 int cpu;
4496
4497 if (event->cpu != -1) {
4498 swevent_hlist_put_cpu(event, event->cpu);
4499 return;
4500 }
4501
4502 for_each_possible_cpu(cpu)
4503 swevent_hlist_put_cpu(event, cpu);
4504}
4505
4506static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4507{
4508 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4509 int err = 0;
4510
4511 mutex_lock(&cpuctx->hlist_mutex);
4512
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004513 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004514 struct swevent_hlist *hlist;
4515
4516 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4517 if (!hlist) {
4518 err = -ENOMEM;
4519 goto exit;
4520 }
4521 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4522 }
4523 cpuctx->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004524exit:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004525 mutex_unlock(&cpuctx->hlist_mutex);
4526
4527 return err;
4528}
4529
4530static int swevent_hlist_get(struct perf_event *event)
4531{
4532 int err;
4533 int cpu, failed_cpu;
4534
4535 if (event->cpu != -1)
4536 return swevent_hlist_get_cpu(event, event->cpu);
4537
4538 get_online_cpus();
4539 for_each_possible_cpu(cpu) {
4540 err = swevent_hlist_get_cpu(event, cpu);
4541 if (err) {
4542 failed_cpu = cpu;
4543 goto fail;
4544 }
4545 }
4546 put_online_cpus();
4547
4548 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004549fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004550 for_each_possible_cpu(cpu) {
4551 if (cpu == failed_cpu)
4552 break;
4553 swevent_hlist_put_cpu(event, cpu);
4554 }
4555
4556 put_online_cpus();
4557 return err;
4558}
4559
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004560atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004561
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004562static void sw_perf_event_destroy(struct perf_event *event)
4563{
4564 u64 event_id = event->attr.config;
4565
4566 WARN_ON(event->parent);
4567
4568 atomic_dec(&perf_swevent_enabled[event_id]);
4569 swevent_hlist_put(event);
4570}
4571
4572static int perf_swevent_init(struct perf_event *event)
4573{
4574 int event_id = event->attr.config;
4575
4576 if (event->attr.type != PERF_TYPE_SOFTWARE)
4577 return -ENOENT;
4578
4579 switch (event_id) {
4580 case PERF_COUNT_SW_CPU_CLOCK:
4581 case PERF_COUNT_SW_TASK_CLOCK:
4582 return -ENOENT;
4583
4584 default:
4585 break;
4586 }
4587
4588 if (event_id > PERF_COUNT_SW_MAX)
4589 return -ENOENT;
4590
4591 if (!event->parent) {
4592 int err;
4593
4594 err = swevent_hlist_get(event);
4595 if (err)
4596 return err;
4597
4598 atomic_inc(&perf_swevent_enabled[event_id]);
4599 event->destroy = sw_perf_event_destroy;
4600 }
4601
4602 return 0;
4603}
4604
4605static struct pmu perf_swevent = {
4606 .event_init = perf_swevent_init,
4607 .enable = perf_swevent_enable,
4608 .disable = perf_swevent_disable,
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004609 .start = perf_swevent_int,
4610 .stop = perf_swevent_void,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004611 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004612 .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004613};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004614
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004615#ifdef CONFIG_EVENT_TRACING
4616
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004617static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004618 struct perf_sample_data *data)
4619{
4620 void *record = data->raw->data;
4621
4622 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4623 return 1;
4624 return 0;
4625}
4626
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004627static int perf_tp_event_match(struct perf_event *event,
4628 struct perf_sample_data *data,
4629 struct pt_regs *regs)
4630{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004631 /*
4632 * All tracepoints are from kernel-space.
4633 */
4634 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004635 return 0;
4636
4637 if (!perf_tp_filter_match(event, data))
4638 return 0;
4639
4640 return 1;
4641}
4642
4643void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004644 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004645{
4646 struct perf_sample_data data;
4647 struct perf_event *event;
4648 struct hlist_node *node;
4649
4650 struct perf_raw_record raw = {
4651 .size = entry_size,
4652 .data = record,
4653 };
4654
4655 perf_sample_data_init(&data, addr);
4656 data.raw = &raw;
4657
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004658 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4659 if (perf_tp_event_match(event, &data, regs))
4660 perf_swevent_add(event, count, 1, &data, regs);
4661 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02004662
4663 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004664}
4665EXPORT_SYMBOL_GPL(perf_tp_event);
4666
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004667static void tp_perf_event_destroy(struct perf_event *event)
4668{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004669 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004670}
4671
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004672static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004673{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004674 int err;
4675
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004676 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4677 return -ENOENT;
4678
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004679 /*
4680 * Raw tracepoint data is a severe data leak, only allow root to
4681 * have these.
4682 */
4683 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4684 perf_paranoid_tracepoint_raw() &&
4685 !capable(CAP_SYS_ADMIN))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004686 return -EPERM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004687
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004688 err = perf_trace_init(event);
4689 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004690 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004691
4692 event->destroy = tp_perf_event_destroy;
4693
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004694 return 0;
4695}
4696
4697static struct pmu perf_tracepoint = {
4698 .event_init = perf_tp_event_init,
4699 .enable = perf_trace_enable,
4700 .disable = perf_trace_disable,
4701 .start = perf_swevent_int,
4702 .stop = perf_swevent_void,
4703 .read = perf_swevent_read,
4704 .unthrottle = perf_swevent_void,
4705};
4706
4707static inline void perf_tp_register(void)
4708{
4709 perf_pmu_register(&perf_tracepoint);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004710}
Li Zefan6fb29152009-10-15 11:21:42 +08004711
4712static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4713{
4714 char *filter_str;
4715 int ret;
4716
4717 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4718 return -EINVAL;
4719
4720 filter_str = strndup_user(arg, PAGE_SIZE);
4721 if (IS_ERR(filter_str))
4722 return PTR_ERR(filter_str);
4723
4724 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4725
4726 kfree(filter_str);
4727 return ret;
4728}
4729
4730static void perf_event_free_filter(struct perf_event *event)
4731{
4732 ftrace_profile_free_filter(event);
4733}
4734
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004735#else
Li Zefan6fb29152009-10-15 11:21:42 +08004736
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004737static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004738{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004739}
Li Zefan6fb29152009-10-15 11:21:42 +08004740
4741static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4742{
4743 return -ENOENT;
4744}
4745
4746static void perf_event_free_filter(struct perf_event *event)
4747{
4748}
4749
Li Zefan07b139c2009-12-21 14:27:35 +08004750#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004751
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004752#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004753void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004754{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004755 struct perf_sample_data sample;
4756 struct pt_regs *regs = data;
4757
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004758 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004759
4760 if (!perf_exclude_event(bp, regs))
4761 perf_swevent_add(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004762}
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004763#endif
4764
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004765/*
4766 * hrtimer based swevent callback
4767 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004768
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004769static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004770{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004771 enum hrtimer_restart ret = HRTIMER_RESTART;
4772 struct perf_sample_data data;
4773 struct pt_regs *regs;
4774 struct perf_event *event;
4775 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004776
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004777 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4778 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004779
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004780 perf_sample_data_init(&data, 0);
4781 data.period = event->hw.last_period;
4782 regs = get_irq_regs();
4783
4784 if (regs && !perf_exclude_event(event, regs)) {
4785 if (!(event->attr.exclude_idle && current->pid == 0))
4786 if (perf_event_overflow(event, 0, &data, regs))
4787 ret = HRTIMER_NORESTART;
4788 }
4789
4790 period = max_t(u64, 10000, event->hw.sample_period);
4791 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4792
4793 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004794}
4795
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004796static void perf_swevent_start_hrtimer(struct perf_event *event)
4797{
4798 struct hw_perf_event *hwc = &event->hw;
4799
4800 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4801 hwc->hrtimer.function = perf_swevent_hrtimer;
4802 if (hwc->sample_period) {
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004803 s64 period = local64_read(&hwc->period_left);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004804
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004805 if (period) {
4806 if (period < 0)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004807 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004808
4809 local64_set(&hwc->period_left, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004810 } else {
4811 period = max_t(u64, 10000, hwc->sample_period);
4812 }
4813 __hrtimer_start_range_ns(&hwc->hrtimer,
4814 ns_to_ktime(period), 0,
4815 HRTIMER_MODE_REL, 0);
4816 }
4817}
4818
4819static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4820{
4821 struct hw_perf_event *hwc = &event->hw;
4822
4823 if (hwc->sample_period) {
4824 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02004825 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004826
4827 hrtimer_cancel(&hwc->hrtimer);
4828 }
4829}
4830
4831/*
4832 * Software event: cpu wall time clock
4833 */
4834
4835static void cpu_clock_event_update(struct perf_event *event)
4836{
4837 int cpu = raw_smp_processor_id();
4838 s64 prev;
4839 u64 now;
4840
4841 now = cpu_clock(cpu);
4842 prev = local64_xchg(&event->hw.prev_count, now);
4843 local64_add(now - prev, &event->count);
4844}
4845
4846static int cpu_clock_event_enable(struct perf_event *event)
4847{
4848 struct hw_perf_event *hwc = &event->hw;
4849 int cpu = raw_smp_processor_id();
4850
4851 local64_set(&hwc->prev_count, cpu_clock(cpu));
4852 perf_swevent_start_hrtimer(event);
4853
4854 return 0;
4855}
4856
4857static void cpu_clock_event_disable(struct perf_event *event)
4858{
4859 perf_swevent_cancel_hrtimer(event);
4860 cpu_clock_event_update(event);
4861}
4862
4863static void cpu_clock_event_read(struct perf_event *event)
4864{
4865 cpu_clock_event_update(event);
4866}
4867
4868static int cpu_clock_event_init(struct perf_event *event)
4869{
4870 if (event->attr.type != PERF_TYPE_SOFTWARE)
4871 return -ENOENT;
4872
4873 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
4874 return -ENOENT;
4875
4876 return 0;
4877}
4878
4879static struct pmu perf_cpu_clock = {
4880 .event_init = cpu_clock_event_init,
4881 .enable = cpu_clock_event_enable,
4882 .disable = cpu_clock_event_disable,
4883 .read = cpu_clock_event_read,
4884};
4885
4886/*
4887 * Software event: task time clock
4888 */
4889
4890static void task_clock_event_update(struct perf_event *event, u64 now)
4891{
4892 u64 prev;
4893 s64 delta;
4894
4895 prev = local64_xchg(&event->hw.prev_count, now);
4896 delta = now - prev;
4897 local64_add(delta, &event->count);
4898}
4899
4900static int task_clock_event_enable(struct perf_event *event)
4901{
4902 struct hw_perf_event *hwc = &event->hw;
4903 u64 now;
4904
4905 now = event->ctx->time;
4906
4907 local64_set(&hwc->prev_count, now);
4908
4909 perf_swevent_start_hrtimer(event);
4910
4911 return 0;
4912}
4913
4914static void task_clock_event_disable(struct perf_event *event)
4915{
4916 perf_swevent_cancel_hrtimer(event);
4917 task_clock_event_update(event, event->ctx->time);
4918
4919}
4920
4921static void task_clock_event_read(struct perf_event *event)
4922{
4923 u64 time;
4924
4925 if (!in_nmi()) {
4926 update_context_time(event->ctx);
4927 time = event->ctx->time;
4928 } else {
4929 u64 now = perf_clock();
4930 u64 delta = now - event->ctx->timestamp;
4931 time = event->ctx->time + delta;
4932 }
4933
4934 task_clock_event_update(event, time);
4935}
4936
4937static int task_clock_event_init(struct perf_event *event)
4938{
4939 if (event->attr.type != PERF_TYPE_SOFTWARE)
4940 return -ENOENT;
4941
4942 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
4943 return -ENOENT;
4944
4945 return 0;
4946}
4947
4948static struct pmu perf_task_clock = {
4949 .event_init = task_clock_event_init,
4950 .enable = task_clock_event_enable,
4951 .disable = task_clock_event_disable,
4952 .read = task_clock_event_read,
4953};
4954
4955static LIST_HEAD(pmus);
4956static DEFINE_MUTEX(pmus_lock);
4957static struct srcu_struct pmus_srcu;
4958
Peter Zijlstraad5133b2010-06-15 12:22:39 +02004959static void perf_pmu_nop_void(struct pmu *pmu)
4960{
4961}
4962
4963static int perf_pmu_nop_int(struct pmu *pmu)
4964{
4965 return 0;
4966}
4967
4968static void perf_pmu_start_txn(struct pmu *pmu)
4969{
4970 perf_pmu_disable(pmu);
4971}
4972
4973static int perf_pmu_commit_txn(struct pmu *pmu)
4974{
4975 perf_pmu_enable(pmu);
4976 return 0;
4977}
4978
4979static void perf_pmu_cancel_txn(struct pmu *pmu)
4980{
4981 perf_pmu_enable(pmu);
4982}
4983
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004984int perf_pmu_register(struct pmu *pmu)
4985{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02004986 int ret;
4987
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02004988 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02004989 ret = -ENOMEM;
4990 pmu->pmu_disable_count = alloc_percpu(int);
4991 if (!pmu->pmu_disable_count)
4992 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02004993
4994 if (!pmu->start_txn) {
4995 if (pmu->pmu_enable) {
4996 /*
4997 * If we have pmu_enable/pmu_disable calls, install
4998 * transaction stubs that use that to try and batch
4999 * hardware accesses.
5000 */
5001 pmu->start_txn = perf_pmu_start_txn;
5002 pmu->commit_txn = perf_pmu_commit_txn;
5003 pmu->cancel_txn = perf_pmu_cancel_txn;
5004 } else {
5005 pmu->start_txn = perf_pmu_nop_void;
5006 pmu->commit_txn = perf_pmu_nop_int;
5007 pmu->cancel_txn = perf_pmu_nop_void;
5008 }
5009 }
5010
5011 if (!pmu->pmu_enable) {
5012 pmu->pmu_enable = perf_pmu_nop_void;
5013 pmu->pmu_disable = perf_pmu_nop_void;
5014 }
5015
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005016 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005017 ret = 0;
5018unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005019 mutex_unlock(&pmus_lock);
5020
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005021 return ret;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005022}
5023
5024void perf_pmu_unregister(struct pmu *pmu)
5025{
5026 mutex_lock(&pmus_lock);
5027 list_del_rcu(&pmu->entry);
5028 mutex_unlock(&pmus_lock);
5029
5030 synchronize_srcu(&pmus_srcu);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005031
5032 free_percpu(pmu->pmu_disable_count);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005033}
5034
5035struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005036{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005037 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005038 int idx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005039
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005040 idx = srcu_read_lock(&pmus_srcu);
5041 list_for_each_entry_rcu(pmu, &pmus, entry) {
5042 int ret = pmu->event_init(event);
5043 if (!ret)
5044 break;
5045 if (ret != -ENOENT) {
5046 pmu = ERR_PTR(ret);
5047 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005048 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005049 }
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005050 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005051
5052 return pmu;
5053}
5054
5055/*
5056 * Allocate and initialize a event structure
5057 */
5058static struct perf_event *
5059perf_event_alloc(struct perf_event_attr *attr,
5060 int cpu,
5061 struct perf_event_context *ctx,
5062 struct perf_event *group_leader,
5063 struct perf_event *parent_event,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005064 perf_overflow_handler_t overflow_handler,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005065 gfp_t gfpflags)
5066{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005067 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005068 struct perf_event *event;
5069 struct hw_perf_event *hwc;
5070 long err;
5071
5072 event = kzalloc(sizeof(*event), gfpflags);
5073 if (!event)
5074 return ERR_PTR(-ENOMEM);
5075
5076 /*
5077 * Single events are their own group leaders, with an
5078 * empty sibling list:
5079 */
5080 if (!group_leader)
5081 group_leader = event;
5082
5083 mutex_init(&event->child_mutex);
5084 INIT_LIST_HEAD(&event->child_list);
5085
5086 INIT_LIST_HEAD(&event->group_entry);
5087 INIT_LIST_HEAD(&event->event_entry);
5088 INIT_LIST_HEAD(&event->sibling_list);
5089 init_waitqueue_head(&event->waitq);
5090
5091 mutex_init(&event->mmap_mutex);
5092
5093 event->cpu = cpu;
5094 event->attr = *attr;
5095 event->group_leader = group_leader;
5096 event->pmu = NULL;
5097 event->ctx = ctx;
5098 event->oncpu = -1;
5099
5100 event->parent = parent_event;
5101
5102 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5103 event->id = atomic64_inc_return(&perf_event_id);
5104
5105 event->state = PERF_EVENT_STATE_INACTIVE;
5106
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005107 if (!overflow_handler && parent_event)
5108 overflow_handler = parent_event->overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005109
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005110 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005111
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005112 if (attr->disabled)
5113 event->state = PERF_EVENT_STATE_OFF;
5114
5115 pmu = NULL;
5116
5117 hwc = &event->hw;
5118 hwc->sample_period = attr->sample_period;
5119 if (attr->freq && attr->sample_freq)
5120 hwc->sample_period = 1;
5121 hwc->last_period = hwc->sample_period;
5122
Peter Zijlstrae7850592010-05-21 14:43:08 +02005123 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005124
5125 /*
5126 * we currently do not support PERF_FORMAT_GROUP on inherited events
5127 */
5128 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5129 goto done;
5130
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005131 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005132
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005133done:
5134 err = 0;
5135 if (!pmu)
5136 err = -EINVAL;
5137 else if (IS_ERR(pmu))
5138 err = PTR_ERR(pmu);
5139
5140 if (err) {
5141 if (event->ns)
5142 put_pid_ns(event->ns);
5143 kfree(event);
5144 return ERR_PTR(err);
5145 }
5146
5147 event->pmu = pmu;
5148
5149 if (!event->parent) {
5150 atomic_inc(&nr_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005151 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005152 atomic_inc(&nr_mmap_events);
5153 if (event->attr.comm)
5154 atomic_inc(&nr_comm_events);
5155 if (event->attr.task)
5156 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005157 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5158 err = get_callchain_buffers();
5159 if (err) {
5160 free_event(event);
5161 return ERR_PTR(err);
5162 }
5163 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005164 }
5165
5166 return event;
5167}
5168
5169static int perf_copy_attr(struct perf_event_attr __user *uattr,
5170 struct perf_event_attr *attr)
5171{
5172 u32 size;
5173 int ret;
5174
5175 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5176 return -EFAULT;
5177
5178 /*
5179 * zero the full structure, so that a short copy will be nice.
5180 */
5181 memset(attr, 0, sizeof(*attr));
5182
5183 ret = get_user(size, &uattr->size);
5184 if (ret)
5185 return ret;
5186
5187 if (size > PAGE_SIZE) /* silly large */
5188 goto err_size;
5189
5190 if (!size) /* abi compat */
5191 size = PERF_ATTR_SIZE_VER0;
5192
5193 if (size < PERF_ATTR_SIZE_VER0)
5194 goto err_size;
5195
5196 /*
5197 * If we're handed a bigger struct than we know of,
5198 * ensure all the unknown bits are 0 - i.e. new
5199 * user-space does not rely on any kernel feature
5200 * extensions we dont know about yet.
5201 */
5202 if (size > sizeof(*attr)) {
5203 unsigned char __user *addr;
5204 unsigned char __user *end;
5205 unsigned char val;
5206
5207 addr = (void __user *)uattr + sizeof(*attr);
5208 end = (void __user *)uattr + size;
5209
5210 for (; addr < end; addr++) {
5211 ret = get_user(val, addr);
5212 if (ret)
5213 return ret;
5214 if (val)
5215 goto err_size;
5216 }
5217 size = sizeof(*attr);
5218 }
5219
5220 ret = copy_from_user(attr, uattr, size);
5221 if (ret)
5222 return -EFAULT;
5223
5224 /*
5225 * If the type exists, the corresponding creation will verify
5226 * the attr->config.
5227 */
5228 if (attr->type >= PERF_TYPE_MAX)
5229 return -EINVAL;
5230
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305231 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005232 return -EINVAL;
5233
5234 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5235 return -EINVAL;
5236
5237 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5238 return -EINVAL;
5239
5240out:
5241 return ret;
5242
5243err_size:
5244 put_user(sizeof(*attr), &uattr->size);
5245 ret = -E2BIG;
5246 goto out;
5247}
5248
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005249static int
5250perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005251{
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005252 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005253 int ret = -EINVAL;
5254
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005255 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005256 goto set;
5257
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005258 /* don't allow circular references */
5259 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005260 goto out;
5261
Peter Zijlstra0f139302010-05-20 14:35:15 +02005262 /*
5263 * Don't allow cross-cpu buffers
5264 */
5265 if (output_event->cpu != event->cpu)
5266 goto out;
5267
5268 /*
5269 * If its not a per-cpu buffer, it must be the same task.
5270 */
5271 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5272 goto out;
5273
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005274set:
5275 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005276 /* Can't redirect output if we've got an active mmap() */
5277 if (atomic_read(&event->mmap_count))
5278 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005279
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005280 if (output_event) {
5281 /* get the buffer we want to redirect to */
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005282 buffer = perf_buffer_get(output_event);
5283 if (!buffer)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005284 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005285 }
5286
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005287 old_buffer = event->buffer;
5288 rcu_assign_pointer(event->buffer, buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005289 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005290unlock:
5291 mutex_unlock(&event->mmap_mutex);
5292
Peter Zijlstraca5135e2010-05-28 19:33:23 +02005293 if (old_buffer)
5294 perf_buffer_put(old_buffer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005295out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005296 return ret;
5297}
5298
5299/**
5300 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5301 *
5302 * @attr_uptr: event_id type attributes for monitoring/sampling
5303 * @pid: target pid
5304 * @cpu: target cpu
5305 * @group_fd: group leader event fd
5306 */
5307SYSCALL_DEFINE5(perf_event_open,
5308 struct perf_event_attr __user *, attr_uptr,
5309 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5310{
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005311 struct perf_event *event, *group_leader = NULL, *output_event = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005312 struct perf_event_attr attr;
5313 struct perf_event_context *ctx;
5314 struct file *event_file = NULL;
5315 struct file *group_file = NULL;
Al Viroea635c62010-05-26 17:40:29 -04005316 int event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005317 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005318 int err;
5319
5320 /* for future expandability... */
5321 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5322 return -EINVAL;
5323
5324 err = perf_copy_attr(attr_uptr, &attr);
5325 if (err)
5326 return err;
5327
5328 if (!attr.exclude_kernel) {
5329 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5330 return -EACCES;
5331 }
5332
5333 if (attr.freq) {
5334 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5335 return -EINVAL;
5336 }
5337
Al Viroea635c62010-05-26 17:40:29 -04005338 event_fd = get_unused_fd_flags(O_RDWR);
5339 if (event_fd < 0)
5340 return event_fd;
5341
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005342 /*
5343 * Get the target context (task or percpu):
5344 */
5345 ctx = find_get_context(pid, cpu);
Al Viroea635c62010-05-26 17:40:29 -04005346 if (IS_ERR(ctx)) {
5347 err = PTR_ERR(ctx);
5348 goto err_fd;
5349 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005350
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005351 if (group_fd != -1) {
5352 group_leader = perf_fget_light(group_fd, &fput_needed);
5353 if (IS_ERR(group_leader)) {
5354 err = PTR_ERR(group_leader);
5355 goto err_put_context;
5356 }
5357 group_file = group_leader->filp;
5358 if (flags & PERF_FLAG_FD_OUTPUT)
5359 output_event = group_leader;
5360 if (flags & PERF_FLAG_FD_NO_GROUP)
5361 group_leader = NULL;
5362 }
5363
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005364 /*
5365 * Look up the group leader (we will attach this event to it):
5366 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005367 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005368 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005369
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005370 /*
5371 * Do not allow a recursive hierarchy (this new sibling
5372 * becoming part of another group-sibling):
5373 */
5374 if (group_leader->group_leader != group_leader)
5375 goto err_put_context;
5376 /*
5377 * Do not allow to attach to a group in a different
5378 * task or CPU context:
5379 */
5380 if (group_leader->ctx != ctx)
5381 goto err_put_context;
5382 /*
5383 * Only a group leader can be exclusive or pinned
5384 */
5385 if (attr.exclusive || attr.pinned)
5386 goto err_put_context;
5387 }
5388
5389 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005390 NULL, NULL, GFP_KERNEL);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005391 if (IS_ERR(event)) {
5392 err = PTR_ERR(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005393 goto err_put_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005394 }
5395
5396 if (output_event) {
5397 err = perf_event_set_output(event, output_event);
5398 if (err)
5399 goto err_free_put_context;
5400 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005401
Al Viroea635c62010-05-26 17:40:29 -04005402 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5403 if (IS_ERR(event_file)) {
5404 err = PTR_ERR(event_file);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005405 goto err_free_put_context;
Al Viroea635c62010-05-26 17:40:29 -04005406 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005407
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005408 event->filp = event_file;
5409 WARN_ON_ONCE(ctx->parent_ctx);
5410 mutex_lock(&ctx->mutex);
5411 perf_install_in_context(ctx, event, cpu);
5412 ++ctx->generation;
5413 mutex_unlock(&ctx->mutex);
5414
5415 event->owner = current;
5416 get_task_struct(current);
5417 mutex_lock(&current->perf_event_mutex);
5418 list_add_tail(&event->owner_entry, &current->perf_event_list);
5419 mutex_unlock(&current->perf_event_mutex);
5420
Peter Zijlstra8a495422010-05-27 15:47:49 +02005421 /*
5422 * Drop the reference on the group_event after placing the
5423 * new event on the sibling_list. This ensures destruction
5424 * of the group leader will find the pointer to itself in
5425 * perf_group_detach().
5426 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005427 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04005428 fd_install(event_fd, event_file);
5429 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005430
Al Viroea635c62010-05-26 17:40:29 -04005431err_free_put_context:
5432 free_event(event);
5433err_put_context:
5434 fput_light(group_file, fput_needed);
5435 put_ctx(ctx);
5436err_fd:
5437 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005438 return err;
5439}
5440
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005441/**
5442 * perf_event_create_kernel_counter
5443 *
5444 * @attr: attributes of the counter to create
5445 * @cpu: cpu in which the counter is bound
5446 * @pid: task to profile
5447 */
5448struct perf_event *
5449perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005450 pid_t pid,
5451 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005452{
5453 struct perf_event *event;
5454 struct perf_event_context *ctx;
5455 int err;
5456
5457 /*
5458 * Get the target context (task or percpu):
5459 */
5460
5461 ctx = find_get_context(pid, cpu);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005462 if (IS_ERR(ctx)) {
5463 err = PTR_ERR(ctx);
5464 goto err_exit;
5465 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005466
5467 event = perf_event_alloc(attr, cpu, ctx, NULL,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005468 NULL, overflow_handler, GFP_KERNEL);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005469 if (IS_ERR(event)) {
5470 err = PTR_ERR(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005471 goto err_put_context;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005472 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005473
5474 event->filp = NULL;
5475 WARN_ON_ONCE(ctx->parent_ctx);
5476 mutex_lock(&ctx->mutex);
5477 perf_install_in_context(ctx, event, cpu);
5478 ++ctx->generation;
5479 mutex_unlock(&ctx->mutex);
5480
5481 event->owner = current;
5482 get_task_struct(current);
5483 mutex_lock(&current->perf_event_mutex);
5484 list_add_tail(&event->owner_entry, &current->perf_event_list);
5485 mutex_unlock(&current->perf_event_mutex);
5486
5487 return event;
5488
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005489 err_put_context:
5490 put_ctx(ctx);
5491 err_exit:
5492 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005493}
5494EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5495
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005496/*
5497 * inherit a event from parent task to child task:
5498 */
5499static struct perf_event *
5500inherit_event(struct perf_event *parent_event,
5501 struct task_struct *parent,
5502 struct perf_event_context *parent_ctx,
5503 struct task_struct *child,
5504 struct perf_event *group_leader,
5505 struct perf_event_context *child_ctx)
5506{
5507 struct perf_event *child_event;
5508
5509 /*
5510 * Instead of creating recursive hierarchies of events,
5511 * we link inherited events back to the original parent,
5512 * which has a filp for sure, which we use as the reference
5513 * count:
5514 */
5515 if (parent_event->parent)
5516 parent_event = parent_event->parent;
5517
5518 child_event = perf_event_alloc(&parent_event->attr,
5519 parent_event->cpu, child_ctx,
5520 group_leader, parent_event,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005521 NULL, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005522 if (IS_ERR(child_event))
5523 return child_event;
5524 get_ctx(child_ctx);
5525
5526 /*
5527 * Make the child state follow the state of the parent event,
5528 * not its attr.disabled bit. We hold the parent's mutex,
5529 * so we won't race with perf_event_{en, dis}able_family.
5530 */
5531 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5532 child_event->state = PERF_EVENT_STATE_INACTIVE;
5533 else
5534 child_event->state = PERF_EVENT_STATE_OFF;
5535
Peter Zijlstra75c9f322010-01-29 09:04:26 +01005536 if (parent_event->attr.freq) {
5537 u64 sample_period = parent_event->hw.sample_period;
5538 struct hw_perf_event *hwc = &child_event->hw;
5539
5540 hwc->sample_period = sample_period;
5541 hwc->last_period = sample_period;
5542
Peter Zijlstrae7850592010-05-21 14:43:08 +02005543 local64_set(&hwc->period_left, sample_period);
Peter Zijlstra75c9f322010-01-29 09:04:26 +01005544 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005545
Peter Zijlstra453f19e2009-11-20 22:19:43 +01005546 child_event->overflow_handler = parent_event->overflow_handler;
5547
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005548 /*
5549 * Link it up in the child's context:
5550 */
5551 add_event_to_ctx(child_event, child_ctx);
5552
5553 /*
5554 * Get a reference to the parent filp - we will fput it
5555 * when the child event exits. This is safe to do because
5556 * we are in the parent and we know that the filp still
5557 * exists and has a nonzero count:
5558 */
5559 atomic_long_inc(&parent_event->filp->f_count);
5560
5561 /*
5562 * Link this into the parent event's child list
5563 */
5564 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5565 mutex_lock(&parent_event->child_mutex);
5566 list_add_tail(&child_event->child_list, &parent_event->child_list);
5567 mutex_unlock(&parent_event->child_mutex);
5568
5569 return child_event;
5570}
5571
5572static int inherit_group(struct perf_event *parent_event,
5573 struct task_struct *parent,
5574 struct perf_event_context *parent_ctx,
5575 struct task_struct *child,
5576 struct perf_event_context *child_ctx)
5577{
5578 struct perf_event *leader;
5579 struct perf_event *sub;
5580 struct perf_event *child_ctr;
5581
5582 leader = inherit_event(parent_event, parent, parent_ctx,
5583 child, NULL, child_ctx);
5584 if (IS_ERR(leader))
5585 return PTR_ERR(leader);
5586 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5587 child_ctr = inherit_event(sub, parent, parent_ctx,
5588 child, leader, child_ctx);
5589 if (IS_ERR(child_ctr))
5590 return PTR_ERR(child_ctr);
5591 }
5592 return 0;
5593}
5594
5595static void sync_child_event(struct perf_event *child_event,
5596 struct task_struct *child)
5597{
5598 struct perf_event *parent_event = child_event->parent;
5599 u64 child_val;
5600
5601 if (child_event->attr.inherit_stat)
5602 perf_event_read_event(child_event, child);
5603
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005604 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005605
5606 /*
5607 * Add back the child's count to the parent's count:
5608 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02005609 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005610 atomic64_add(child_event->total_time_enabled,
5611 &parent_event->child_total_time_enabled);
5612 atomic64_add(child_event->total_time_running,
5613 &parent_event->child_total_time_running);
5614
5615 /*
5616 * Remove this event from the parent's list
5617 */
5618 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5619 mutex_lock(&parent_event->child_mutex);
5620 list_del_init(&child_event->child_list);
5621 mutex_unlock(&parent_event->child_mutex);
5622
5623 /*
5624 * Release the parent event, if this was the last
5625 * reference to it.
5626 */
5627 fput(parent_event->filp);
5628}
5629
5630static void
5631__perf_event_exit_task(struct perf_event *child_event,
5632 struct perf_event_context *child_ctx,
5633 struct task_struct *child)
5634{
5635 struct perf_event *parent_event;
5636
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005637 perf_event_remove_from_context(child_event);
5638
5639 parent_event = child_event->parent;
5640 /*
5641 * It can happen that parent exits first, and has events
5642 * that are still around due to the child reference. These
5643 * events need to be zapped - but otherwise linger.
5644 */
5645 if (parent_event) {
5646 sync_child_event(child_event, child);
5647 free_event(child_event);
5648 }
5649}
5650
5651/*
5652 * When a child task exits, feed back event values to parent events.
5653 */
5654void perf_event_exit_task(struct task_struct *child)
5655{
5656 struct perf_event *child_event, *tmp;
5657 struct perf_event_context *child_ctx;
5658 unsigned long flags;
5659
5660 if (likely(!child->perf_event_ctxp)) {
5661 perf_event_task(child, NULL, 0);
5662 return;
5663 }
5664
5665 local_irq_save(flags);
5666 /*
5667 * We can't reschedule here because interrupts are disabled,
5668 * and either child is current or it is a task that can't be
5669 * scheduled, so we are now safe from rescheduling changing
5670 * our context.
5671 */
5672 child_ctx = child->perf_event_ctxp;
5673 __perf_event_task_sched_out(child_ctx);
5674
5675 /*
5676 * Take the context lock here so that if find_get_context is
5677 * reading child->perf_event_ctxp, we wait until it has
5678 * incremented the context's refcount before we do put_ctx below.
5679 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005680 raw_spin_lock(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005681 child->perf_event_ctxp = NULL;
5682 /*
5683 * If this context is a clone; unclone it so it can't get
5684 * swapped to another process while we're removing all
5685 * the events from it.
5686 */
5687 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01005688 update_context_time(child_ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005689 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005690
5691 /*
5692 * Report the task dead after unscheduling the events so that we
5693 * won't get any samples after PERF_RECORD_EXIT. We can however still
5694 * get a few PERF_RECORD_READ events.
5695 */
5696 perf_event_task(child, child_ctx, 0);
5697
5698 /*
5699 * We can recurse on the same lock type through:
5700 *
5701 * __perf_event_exit_task()
5702 * sync_child_event()
5703 * fput(parent_event->filp)
5704 * perf_release()
5705 * mutex_lock(&ctx->mutex)
5706 *
5707 * But since its the parent context it won't be the same instance.
5708 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02005709 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005710
5711again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005712 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5713 group_entry)
5714 __perf_event_exit_task(child_event, child_ctx, child);
5715
5716 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005717 group_entry)
5718 __perf_event_exit_task(child_event, child_ctx, child);
5719
5720 /*
5721 * If the last event was a group event, it will have appended all
5722 * its siblings to the list, but we obtained 'tmp' before that which
5723 * will still point to the list head terminating the iteration.
5724 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005725 if (!list_empty(&child_ctx->pinned_groups) ||
5726 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005727 goto again;
5728
5729 mutex_unlock(&child_ctx->mutex);
5730
5731 put_ctx(child_ctx);
5732}
5733
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005734static void perf_free_event(struct perf_event *event,
5735 struct perf_event_context *ctx)
5736{
5737 struct perf_event *parent = event->parent;
5738
5739 if (WARN_ON_ONCE(!parent))
5740 return;
5741
5742 mutex_lock(&parent->child_mutex);
5743 list_del_init(&event->child_list);
5744 mutex_unlock(&parent->child_mutex);
5745
5746 fput(parent->filp);
5747
Peter Zijlstra8a495422010-05-27 15:47:49 +02005748 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005749 list_del_event(event, ctx);
5750 free_event(event);
5751}
5752
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005753/*
5754 * free an unexposed, unused context as created by inheritance by
5755 * init_task below, used by fork() in case of fail.
5756 */
5757void perf_event_free_task(struct task_struct *task)
5758{
5759 struct perf_event_context *ctx = task->perf_event_ctxp;
5760 struct perf_event *event, *tmp;
5761
5762 if (!ctx)
5763 return;
5764
5765 mutex_lock(&ctx->mutex);
5766again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005767 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5768 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005769
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005770 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5771 group_entry)
5772 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005773
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005774 if (!list_empty(&ctx->pinned_groups) ||
5775 !list_empty(&ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005776 goto again;
5777
5778 mutex_unlock(&ctx->mutex);
5779
5780 put_ctx(ctx);
5781}
5782
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005783static int
5784inherit_task_group(struct perf_event *event, struct task_struct *parent,
5785 struct perf_event_context *parent_ctx,
5786 struct task_struct *child,
5787 int *inherited_all)
5788{
5789 int ret;
5790 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5791
5792 if (!event->attr.inherit) {
5793 *inherited_all = 0;
5794 return 0;
5795 }
5796
5797 if (!child_ctx) {
5798 /*
5799 * This is executed from the parent task context, so
5800 * inherit events that have been marked for cloning.
5801 * First allocate and initialize a context for the
5802 * child.
5803 */
5804
5805 child_ctx = kzalloc(sizeof(struct perf_event_context),
5806 GFP_KERNEL);
5807 if (!child_ctx)
5808 return -ENOMEM;
5809
5810 __perf_event_init_context(child_ctx, child);
5811 child->perf_event_ctxp = child_ctx;
5812 get_task_struct(child);
5813 }
5814
5815 ret = inherit_group(event, parent, parent_ctx,
5816 child, child_ctx);
5817
5818 if (ret)
5819 *inherited_all = 0;
5820
5821 return ret;
5822}
5823
5824
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005825/*
5826 * Initialize the perf_event context in task_struct
5827 */
5828int perf_event_init_task(struct task_struct *child)
5829{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005830 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005831 struct perf_event_context *cloned_ctx;
5832 struct perf_event *event;
5833 struct task_struct *parent = current;
5834 int inherited_all = 1;
5835 int ret = 0;
5836
5837 child->perf_event_ctxp = NULL;
5838
5839 mutex_init(&child->perf_event_mutex);
5840 INIT_LIST_HEAD(&child->perf_event_list);
5841
5842 if (likely(!parent->perf_event_ctxp))
5843 return 0;
5844
5845 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005846 * If the parent's context is a clone, pin it so it won't get
5847 * swapped under us.
5848 */
5849 parent_ctx = perf_pin_task_context(parent);
5850
5851 /*
5852 * No need to check if parent_ctx != NULL here; since we saw
5853 * it non-NULL earlier, the only reason for it to become NULL
5854 * is if we exit, and since we're currently in the middle of
5855 * a fork we can't be exiting at the same time.
5856 */
5857
5858 /*
5859 * Lock the parent list. No need to lock the child - not PID
5860 * hashed yet and not running, so nobody can access it.
5861 */
5862 mutex_lock(&parent_ctx->mutex);
5863
5864 /*
5865 * We dont have to disable NMIs - we are only looking at
5866 * the list, not manipulating it:
5867 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005868 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5869 ret = inherit_task_group(event, parent, parent_ctx, child,
5870 &inherited_all);
5871 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005872 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005873 }
5874
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005875 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5876 ret = inherit_task_group(event, parent, parent_ctx, child,
5877 &inherited_all);
5878 if (ret)
5879 break;
5880 }
5881
5882 child_ctx = child->perf_event_ctxp;
5883
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01005884 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005885 /*
5886 * Mark the child context as a clone of the parent
5887 * context, or of whatever the parent is a clone of.
5888 * Note that if the parent is a clone, it could get
5889 * uncloned at any point, but that doesn't matter
5890 * because the list of events and the generation
5891 * count can't have changed since we took the mutex.
5892 */
5893 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5894 if (cloned_ctx) {
5895 child_ctx->parent_ctx = cloned_ctx;
5896 child_ctx->parent_gen = parent_ctx->parent_gen;
5897 } else {
5898 child_ctx->parent_ctx = parent_ctx;
5899 child_ctx->parent_gen = parent_ctx->generation;
5900 }
5901 get_ctx(child_ctx->parent_ctx);
5902 }
5903
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005904 mutex_unlock(&parent_ctx->mutex);
5905
5906 perf_unpin_context(parent_ctx);
5907
5908 return ret;
5909}
5910
Paul Mackerras220b1402010-03-10 20:45:52 +11005911static void __init perf_event_init_all_cpus(void)
5912{
5913 int cpu;
5914 struct perf_cpu_context *cpuctx;
5915
5916 for_each_possible_cpu(cpu) {
5917 cpuctx = &per_cpu(perf_cpu_context, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005918 mutex_init(&cpuctx->hlist_mutex);
Paul Mackerras220b1402010-03-10 20:45:52 +11005919 __perf_event_init_context(&cpuctx->ctx, NULL);
5920 }
5921}
5922
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005923static void __cpuinit perf_event_init_cpu(int cpu)
5924{
5925 struct perf_cpu_context *cpuctx;
5926
5927 cpuctx = &per_cpu(perf_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005928
5929 spin_lock(&perf_resource_lock);
5930 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5931 spin_unlock(&perf_resource_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005932
5933 mutex_lock(&cpuctx->hlist_mutex);
5934 if (cpuctx->hlist_refcount > 0) {
5935 struct swevent_hlist *hlist;
5936
5937 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5938 WARN_ON_ONCE(!hlist);
5939 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5940 }
5941 mutex_unlock(&cpuctx->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005942}
5943
5944#ifdef CONFIG_HOTPLUG_CPU
5945static void __perf_event_exit_cpu(void *info)
5946{
5947 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5948 struct perf_event_context *ctx = &cpuctx->ctx;
5949 struct perf_event *event, *tmp;
5950
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005951 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5952 __perf_event_remove_from_context(event);
5953 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005954 __perf_event_remove_from_context(event);
5955}
5956static void perf_event_exit_cpu(int cpu)
5957{
5958 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5959 struct perf_event_context *ctx = &cpuctx->ctx;
5960
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005961 mutex_lock(&cpuctx->hlist_mutex);
5962 swevent_hlist_release(cpuctx);
5963 mutex_unlock(&cpuctx->hlist_mutex);
5964
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005965 mutex_lock(&ctx->mutex);
5966 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5967 mutex_unlock(&ctx->mutex);
5968}
5969#else
5970static inline void perf_event_exit_cpu(int cpu) { }
5971#endif
5972
5973static int __cpuinit
5974perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5975{
5976 unsigned int cpu = (long)hcpu;
5977
Peter Zijlstra5e116372010-06-11 13:35:08 +02005978 switch (action & ~CPU_TASKS_FROZEN) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005979
5980 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02005981 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005982 perf_event_init_cpu(cpu);
5983 break;
5984
Peter Zijlstra5e116372010-06-11 13:35:08 +02005985 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005986 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005987 perf_event_exit_cpu(cpu);
5988 break;
5989
5990 default:
5991 break;
5992 }
5993
5994 return NOTIFY_OK;
5995}
5996
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005997void __init perf_event_init(void)
5998{
Paul Mackerras220b1402010-03-10 20:45:52 +11005999 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02006000 init_srcu_struct(&pmus_srcu);
6001 perf_pmu_register(&perf_swevent);
6002 perf_pmu_register(&perf_cpu_clock);
6003 perf_pmu_register(&perf_task_clock);
6004 perf_tp_register();
6005 perf_cpu_notifier(perf_cpu_notify);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006006}
6007
Andi Kleenc9be0a32010-01-05 12:47:58 +01006008static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
6009 struct sysdev_class_attribute *attr,
6010 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006011{
6012 return sprintf(buf, "%d\n", perf_reserved_percpu);
6013}
6014
6015static ssize_t
6016perf_set_reserve_percpu(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01006017 struct sysdev_class_attribute *attr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006018 const char *buf,
6019 size_t count)
6020{
6021 struct perf_cpu_context *cpuctx;
6022 unsigned long val;
6023 int err, cpu, mpt;
6024
6025 err = strict_strtoul(buf, 10, &val);
6026 if (err)
6027 return err;
6028 if (val > perf_max_events)
6029 return -EINVAL;
6030
6031 spin_lock(&perf_resource_lock);
6032 perf_reserved_percpu = val;
6033 for_each_online_cpu(cpu) {
6034 cpuctx = &per_cpu(perf_cpu_context, cpu);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01006035 raw_spin_lock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006036 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
6037 perf_max_events - perf_reserved_percpu);
6038 cpuctx->max_pertask = mpt;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01006039 raw_spin_unlock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006040 }
6041 spin_unlock(&perf_resource_lock);
6042
6043 return count;
6044}
6045
Andi Kleenc9be0a32010-01-05 12:47:58 +01006046static ssize_t perf_show_overcommit(struct sysdev_class *class,
6047 struct sysdev_class_attribute *attr,
6048 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006049{
6050 return sprintf(buf, "%d\n", perf_overcommit);
6051}
6052
6053static ssize_t
Andi Kleenc9be0a32010-01-05 12:47:58 +01006054perf_set_overcommit(struct sysdev_class *class,
6055 struct sysdev_class_attribute *attr,
6056 const char *buf, size_t count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006057{
6058 unsigned long val;
6059 int err;
6060
6061 err = strict_strtoul(buf, 10, &val);
6062 if (err)
6063 return err;
6064 if (val > 1)
6065 return -EINVAL;
6066
6067 spin_lock(&perf_resource_lock);
6068 perf_overcommit = val;
6069 spin_unlock(&perf_resource_lock);
6070
6071 return count;
6072}
6073
6074static SYSDEV_CLASS_ATTR(
6075 reserve_percpu,
6076 0644,
6077 perf_show_reserve_percpu,
6078 perf_set_reserve_percpu
6079 );
6080
6081static SYSDEV_CLASS_ATTR(
6082 overcommit,
6083 0644,
6084 perf_show_overcommit,
6085 perf_set_overcommit
6086 );
6087
6088static struct attribute *perfclass_attrs[] = {
6089 &attr_reserve_percpu.attr,
6090 &attr_overcommit.attr,
6091 NULL
6092};
6093
6094static struct attribute_group perfclass_attr_group = {
6095 .attrs = perfclass_attrs,
6096 .name = "perf_events",
6097};
6098
6099static int __init perf_event_sysfs_init(void)
6100{
6101 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
6102 &perfclass_attr_group);
6103}
6104device_initcall(perf_event_sysfs_init);