blob: e52784b7b84492b9fb47756db0d62b92a22967ac [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +01009#include "trace.h"
10
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020011/*
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
14 */
15typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17char *trace_profile_buf;
Peter Zijlstra05bafda2009-09-20 12:34:38 +020018EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20char *trace_profile_buf_nmi;
21EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020022
23/* Count the events in use (per event id, not per instance) */
24static int total_profile_count;
25
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020026static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27{
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020028 char *buf;
29 int ret = -ENOMEM;
30
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020031 if (atomic_inc_return(&event->profile_count))
32 return 0;
33
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020034 if (!total_profile_count) {
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020035 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf)
37 goto fail_buf;
38
39 rcu_assign_pointer(trace_profile_buf, buf);
40
41 buf = (char *)alloc_percpu(profile_buf_t);
42 if (!buf)
43 goto fail_buf_nmi;
44
45 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46 }
47
48 ret = event->profile_enable();
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020049 if (!ret) {
50 total_profile_count++;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020051 return 0;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020052 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020053
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020054fail_buf_nmi:
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020055 if (!total_profile_count) {
56 kfree(trace_profile_buf_nmi);
57 kfree(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020061fail_buf:
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020062 atomic_dec(&event->profile_count);
63
64 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020065}
66
Peter Zijlstraac199db2009-03-19 20:26:15 +010067int ftrace_profile_enable(int event_id)
68{
69 struct ftrace_event_call *event;
Li Zefan20c89282009-05-06 10:33:45 +080070 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010071
Li Zefan20c89282009-05-06 10:33:45 +080072 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -040073 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan558e6542009-08-24 12:19:47 +080074 if (event->id == event_id && event->profile_enable &&
75 try_module_get(event->mod)) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020076 ret = ftrace_profile_enable_event(event);
Li Zefan20c89282009-05-06 10:33:45 +080077 break;
78 }
Peter Zijlstraac199db2009-03-19 20:26:15 +010079 }
Li Zefan20c89282009-05-06 10:33:45 +080080 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +010081
Li Zefan20c89282009-05-06 10:33:45 +080082 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +010083}
84
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020085static void ftrace_profile_disable_event(struct ftrace_event_call *event)
86{
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020087 char *buf, *nmi_buf;
88
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020089 if (!atomic_add_negative(-1, &event->profile_count))
90 return;
91
92 event->profile_disable();
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020093
94 if (!--total_profile_count) {
95 buf = trace_profile_buf;
96 rcu_assign_pointer(trace_profile_buf, NULL);
97
98 nmi_buf = trace_profile_buf_nmi;
99 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
100
101 /*
102 * Ensure every events in profiling have finished before
103 * releasing the buffers
104 */
105 synchronize_sched();
106
107 free_percpu(buf);
108 free_percpu(nmi_buf);
109 }
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200110}
111
Peter Zijlstraac199db2009-03-19 20:26:15 +0100112void ftrace_profile_disable(int event_id)
113{
114 struct ftrace_event_call *event;
115
Li Zefan20c89282009-05-06 10:33:45 +0800116 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -0400117 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan20c89282009-05-06 10:33:45 +0800118 if (event->id == event_id) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200119 ftrace_profile_disable_event(event);
Li Zefan558e6542009-08-24 12:19:47 +0800120 module_put(event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800121 break;
122 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100123 }
Li Zefan20c89282009-05-06 10:33:45 +0800124 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100125}