blob: da14432806c6e1662e536c2c18c21763094a5329 [file] [log] [blame]
Matt Fleming3d90a002010-09-27 20:45:08 +01001/*
2 * Copyright 2010 ARM Ltd.
3 *
4 * Perf-events backend for OProfile.
5 */
6#include <linux/perf_event.h>
Anand Gadiyar277dd982010-10-14 11:31:43 -04007#include <linux/platform_device.h>
Matt Fleming3d90a002010-09-27 20:45:08 +01008#include <linux/oprofile.h>
9#include <linux/slab.h>
10
11/*
12 * Per performance monitor configuration as set via oprofilefs.
13 */
14struct op_counter_config {
15 unsigned long count;
16 unsigned long enabled;
17 unsigned long event;
18 unsigned long unit_mask;
19 unsigned long kernel;
20 unsigned long user;
21 struct perf_event_attr attr;
22};
23
24static int oprofile_perf_enabled;
25static DEFINE_MUTEX(oprofile_perf_mutex);
26
27static struct op_counter_config *counter_config;
28static struct perf_event **perf_events[nr_cpumask_bits];
29static int num_counters;
30
31/*
32 * Overflow callback for oprofile.
33 */
Will Deacon7fcfd1a2011-07-08 18:34:42 +010034static void op_overflow_handler(struct perf_event *event,
Matt Fleming3d90a002010-09-27 20:45:08 +010035 struct perf_sample_data *data, struct pt_regs *regs)
36{
37 int id;
38 u32 cpu = smp_processor_id();
39
40 for (id = 0; id < num_counters; ++id)
41 if (perf_events[cpu][id] == event)
42 break;
43
44 if (id != num_counters)
45 oprofile_add_sample(regs, id);
46 else
47 pr_warning("oprofile: ignoring spurious overflow "
48 "on cpu %u\n", cpu);
49}
50
51/*
52 * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
53 * settings in counter_config. Attributes are created as `pinned' events and
54 * so are permanently scheduled on the PMU.
55 */
56static void op_perf_setup(void)
57{
58 int i;
59 u32 size = sizeof(struct perf_event_attr);
60 struct perf_event_attr *attr;
61
62 for (i = 0; i < num_counters; ++i) {
63 attr = &counter_config[i].attr;
64 memset(attr, 0, size);
65 attr->type = PERF_TYPE_RAW;
66 attr->size = size;
67 attr->config = counter_config[i].event;
68 attr->sample_period = counter_config[i].count;
69 attr->pinned = 1;
70 }
71}
72
73static int op_create_counter(int cpu, int event)
74{
Matt Fleming3d90a002010-09-27 20:45:08 +010075 struct perf_event *pevent;
76
Robert Richter2bcb2b62010-09-29 14:43:29 +020077 if (!counter_config[event].enabled || perf_events[cpu][event])
78 return 0;
Matt Fleming3d90a002010-09-27 20:45:08 +010079
80 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
Robert Richter62684642010-10-15 12:45:00 +020081 cpu, NULL,
Avi Kivity4dc0da82011-06-29 18:42:35 +030082 op_overflow_handler, NULL);
Matt Fleming3d90a002010-09-27 20:45:08 +010083
Robert Richter2bcb2b62010-09-29 14:43:29 +020084 if (IS_ERR(pevent))
85 return PTR_ERR(pevent);
86
87 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
Robert Richter81771972010-09-29 16:52:25 +020088 perf_event_release_kernel(pevent);
Matt Fleming3d90a002010-09-27 20:45:08 +010089 pr_warning("oprofile: failed to enable event %d "
90 "on CPU %d\n", event, cpu);
Robert Richter2bcb2b62010-09-29 14:43:29 +020091 return -EBUSY;
Matt Fleming3d90a002010-09-27 20:45:08 +010092 }
93
Robert Richter2bcb2b62010-09-29 14:43:29 +020094 perf_events[cpu][event] = pevent;
95
96 return 0;
Matt Fleming3d90a002010-09-27 20:45:08 +010097}
98
99static void op_destroy_counter(int cpu, int event)
100{
101 struct perf_event *pevent = perf_events[cpu][event];
102
103 if (pevent) {
104 perf_event_release_kernel(pevent);
105 perf_events[cpu][event] = NULL;
106 }
107}
108
109/*
110 * Called by oprofile_perf_start to create active perf events based on the
111 * perviously configured attributes.
112 */
113static int op_perf_start(void)
114{
115 int cpu, event, ret = 0;
116
117 for_each_online_cpu(cpu) {
118 for (event = 0; event < num_counters; ++event) {
119 ret = op_create_counter(cpu, event);
120 if (ret)
Robert Richter9c912832010-08-27 14:32:41 +0200121 return ret;
Matt Fleming3d90a002010-09-27 20:45:08 +0100122 }
123 }
124
Matt Fleming3d90a002010-09-27 20:45:08 +0100125 return ret;
126}
127
128/*
129 * Called by oprofile_perf_stop at the end of a profiling run.
130 */
131static void op_perf_stop(void)
132{
133 int cpu, event;
134
135 for_each_online_cpu(cpu)
136 for (event = 0; event < num_counters; ++event)
137 op_destroy_counter(cpu, event);
138}
139
140static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
141{
142 unsigned int i;
143
144 for (i = 0; i < num_counters; i++) {
145 struct dentry *dir;
146 char buf[4];
147
148 snprintf(buf, sizeof buf, "%d", i);
149 dir = oprofilefs_mkdir(sb, root, buf);
150 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
151 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
152 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
153 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
154 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
155 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
156 }
157
158 return 0;
159}
160
161static int oprofile_perf_setup(void)
162{
Thomas Gleixner2d21a292009-07-25 16:18:34 +0200163 raw_spin_lock(&oprofilefs_lock);
Matt Fleming3d90a002010-09-27 20:45:08 +0100164 op_perf_setup();
Thomas Gleixner2d21a292009-07-25 16:18:34 +0200165 raw_spin_unlock(&oprofilefs_lock);
Matt Fleming3d90a002010-09-27 20:45:08 +0100166 return 0;
167}
168
169static int oprofile_perf_start(void)
170{
171 int ret = -EBUSY;
172
173 mutex_lock(&oprofile_perf_mutex);
174 if (!oprofile_perf_enabled) {
175 ret = 0;
176 op_perf_start();
177 oprofile_perf_enabled = 1;
178 }
179 mutex_unlock(&oprofile_perf_mutex);
180 return ret;
181}
182
183static void oprofile_perf_stop(void)
184{
185 mutex_lock(&oprofile_perf_mutex);
186 if (oprofile_perf_enabled)
187 op_perf_stop();
188 oprofile_perf_enabled = 0;
189 mutex_unlock(&oprofile_perf_mutex);
190}
191
192#ifdef CONFIG_PM
Robert Richtercd254f22010-10-15 11:28:07 +0200193
Matt Fleming3d90a002010-09-27 20:45:08 +0100194static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
195{
196 mutex_lock(&oprofile_perf_mutex);
197 if (oprofile_perf_enabled)
198 op_perf_stop();
199 mutex_unlock(&oprofile_perf_mutex);
200 return 0;
201}
202
203static int oprofile_perf_resume(struct platform_device *dev)
204{
205 mutex_lock(&oprofile_perf_mutex);
206 if (oprofile_perf_enabled && op_perf_start())
207 oprofile_perf_enabled = 0;
208 mutex_unlock(&oprofile_perf_mutex);
209 return 0;
210}
211
212static struct platform_driver oprofile_driver = {
213 .driver = {
214 .name = "oprofile-perf",
215 },
216 .resume = oprofile_perf_resume,
217 .suspend = oprofile_perf_suspend,
218};
219
220static struct platform_device *oprofile_pdev;
221
222static int __init init_driverfs(void)
223{
224 int ret;
225
226 ret = platform_driver_register(&oprofile_driver);
227 if (ret)
Robert Richter9c912832010-08-27 14:32:41 +0200228 return ret;
Matt Fleming3d90a002010-09-27 20:45:08 +0100229
230 oprofile_pdev = platform_device_register_simple(
231 oprofile_driver.driver.name, 0, NULL, 0);
232 if (IS_ERR(oprofile_pdev)) {
233 ret = PTR_ERR(oprofile_pdev);
234 platform_driver_unregister(&oprofile_driver);
235 }
236
Matt Fleming3d90a002010-09-27 20:45:08 +0100237 return ret;
238}
239
Anand Gadiyarb3b3a9b2010-10-14 11:31:42 -0400240static void exit_driverfs(void)
Matt Fleming3d90a002010-09-27 20:45:08 +0100241{
242 platform_device_unregister(oprofile_pdev);
243 platform_driver_unregister(&oprofile_driver);
244}
Robert Richtercd254f22010-10-15 11:28:07 +0200245
Matt Fleming3d90a002010-09-27 20:45:08 +0100246#else
Robert Richtercd254f22010-10-15 11:28:07 +0200247
248static inline int init_driverfs(void) { return 0; }
249static inline void exit_driverfs(void) { }
250
Matt Fleming3d90a002010-09-27 20:45:08 +0100251#endif /* CONFIG_PM */
252
Robert Richtere9677b32010-09-29 15:42:30 +0200253void oprofile_perf_exit(void)
254{
255 int cpu, id;
256 struct perf_event *event;
257
258 for_each_possible_cpu(cpu) {
259 for (id = 0; id < num_counters; ++id) {
260 event = perf_events[cpu][id];
261 if (event)
262 perf_event_release_kernel(event);
263 }
264
265 kfree(perf_events[cpu]);
266 }
267
268 kfree(counter_config);
269 exit_driverfs();
270}
271
Matt Fleming3d90a002010-09-27 20:45:08 +0100272int __init oprofile_perf_init(struct oprofile_operations *ops)
273{
274 int cpu, ret = 0;
275
Robert Richtere9677b32010-09-29 15:42:30 +0200276 ret = init_driverfs();
277 if (ret)
278 return ret;
279
Matt Fleming3d90a002010-09-27 20:45:08 +0100280 memset(&perf_events, 0, sizeof(perf_events));
281
282 num_counters = perf_num_counters();
283 if (num_counters <= 0) {
284 pr_info("oprofile: no performance counters\n");
285 ret = -ENODEV;
286 goto out;
287 }
288
289 counter_config = kcalloc(num_counters,
290 sizeof(struct op_counter_config), GFP_KERNEL);
291
292 if (!counter_config) {
293 pr_info("oprofile: failed to allocate %d "
294 "counters\n", num_counters);
295 ret = -ENOMEM;
Robert Richtere9677b32010-09-29 15:42:30 +0200296 num_counters = 0;
Matt Fleming3d90a002010-09-27 20:45:08 +0100297 goto out;
298 }
299
Matt Fleming3d90a002010-09-27 20:45:08 +0100300 for_each_possible_cpu(cpu) {
301 perf_events[cpu] = kcalloc(num_counters,
302 sizeof(struct perf_event *), GFP_KERNEL);
303 if (!perf_events[cpu]) {
304 pr_info("oprofile: failed to allocate %d perf events "
305 "for cpu %d\n", num_counters, cpu);
306 ret = -ENOMEM;
307 goto out;
308 }
309 }
310
311 ops->create_files = oprofile_perf_create_files;
312 ops->setup = oprofile_perf_setup;
313 ops->start = oprofile_perf_start;
314 ops->stop = oprofile_perf_stop;
315 ops->shutdown = oprofile_perf_stop;
316 ops->cpu_type = op_name_from_perf_id();
317
318 if (!ops->cpu_type)
319 ret = -ENODEV;
320 else
321 pr_info("oprofile: using %s\n", ops->cpu_type);
322
323out:
Robert Richtere9677b32010-09-29 15:42:30 +0200324 if (ret)
325 oprofile_perf_exit();
Matt Fleming3d90a002010-09-27 20:45:08 +0100326
327 return ret;
328}