blob: f0d693005075c938a7163e38f67171587814f0cc [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08009#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010010#include "trace.h"
11
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020012
Xiao Guangrong430ad5a2010-01-28 09:32:29 +080013static char *perf_trace_buf;
14static char *perf_trace_buf_nmi;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020015
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010016typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
17
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020018/* Count the events in use (per event id, not per instance) */
19static int total_profile_count;
20
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020021static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22{
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010023 char *buf;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020024 int ret = -ENOMEM;
25
Li Zefane00bf2e2009-12-08 11:17:29 +080026 if (event->profile_count++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020027 return 0;
28
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020029 if (!total_profile_count) {
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010030 buf = (char *)alloc_percpu(perf_trace_t);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020031 if (!buf)
32 goto fail_buf;
33
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010034 rcu_assign_pointer(perf_trace_buf, buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020035
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010036 buf = (char *)alloc_percpu(perf_trace_t);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020037 if (!buf)
38 goto fail_buf_nmi;
39
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010040 rcu_assign_pointer(perf_trace_buf_nmi, buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020041 }
42
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020043 ret = event->profile_enable(event);
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020044 if (!ret) {
45 total_profile_count++;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020046 return 0;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020047 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020048
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020049fail_buf_nmi:
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020050 if (!total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010051 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020055 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020056fail_buf:
Li Zefane00bf2e2009-12-08 11:17:29 +080057 event->profile_count--;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020058
59 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020060}
61
Peter Zijlstraac199db2009-03-19 20:26:15 +010062int ftrace_profile_enable(int event_id)
63{
64 struct ftrace_event_call *event;
Li Zefan20c89282009-05-06 10:33:45 +080065 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010066
Li Zefan20c89282009-05-06 10:33:45 +080067 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -040068 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan558e6542009-08-24 12:19:47 +080069 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020071 ret = ftrace_profile_enable_event(event);
Li Zefan20c89282009-05-06 10:33:45 +080072 break;
73 }
Peter Zijlstraac199db2009-03-19 20:26:15 +010074 }
Li Zefan20c89282009-05-06 10:33:45 +080075 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +010076
Li Zefan20c89282009-05-06 10:33:45 +080077 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +010078}
79
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020080static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010082 char *buf, *nmi_buf;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020083
Li Zefane00bf2e2009-12-08 11:17:29 +080084 if (--event->profile_count > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020085 return;
86
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020087 event->profile_disable(event);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020088
89 if (!--total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010090 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020092
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010093 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020095
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200105}
106
Peter Zijlstraac199db2009-03-19 20:26:15 +0100107void ftrace_profile_disable(int event_id)
108{
109 struct ftrace_event_call *event;
110
Li Zefan20c89282009-05-06 10:33:45 +0800111 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -0400112 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan20c89282009-05-06 10:33:45 +0800113 if (event->id == event_id) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200114 ftrace_profile_disable_event(event);
Li Zefan558e6542009-08-24 12:19:47 +0800115 module_put(event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800116 break;
117 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100118 }
Li Zefan20c89282009-05-06 10:33:45 +0800119 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100120}
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800121
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags)
124{
125 struct trace_entry *entry;
126 char *trace_buf, *raw_data;
127 int pc, cpu;
128
129 pc = preempt_count();
130
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags);
133
134 *rctxp = perf_swevent_get_recursion_context();
135 if (*rctxp < 0)
136 goto err_recursion;
137
138 cpu = smp_processor_id();
139
140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi);
142 else
143 trace_buf = rcu_dereference(perf_trace_buf);
144
145 if (!trace_buf)
146 goto err;
147
148 raw_data = per_cpu_ptr(trace_buf, cpu);
149
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
152
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc);
155 entry->type = type;
156
157 return raw_data;
158err:
159 perf_swevent_put_recursion_context(*rctxp);
160err_recursion:
161 local_irq_restore(*irq_flags);
162 return NULL;
163}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);