blob: 4b150a754890887a4f2c5751e90d7842a12d6a7a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Matt Fleming3d90a002010-09-27 20:45:08 +01002/*
3 * Copyright 2010 ARM Ltd.
Robert Richterf8bbfd72012-02-23 17:07:06 +01004 * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
Matt Fleming3d90a002010-09-27 20:45:08 +01005 *
6 * Perf-events backend for OProfile.
7 */
8#include <linux/perf_event.h>
Anand Gadiyar277dd982010-10-14 11:31:43 -04009#include <linux/platform_device.h>
Matt Fleming3d90a002010-09-27 20:45:08 +010010#include <linux/oprofile.h>
11#include <linux/slab.h>
12
13/*
14 * Per performance monitor configuration as set via oprofilefs.
15 */
16struct op_counter_config {
17 unsigned long count;
18 unsigned long enabled;
19 unsigned long event;
20 unsigned long unit_mask;
21 unsigned long kernel;
22 unsigned long user;
23 struct perf_event_attr attr;
24};
25
26static int oprofile_perf_enabled;
27static DEFINE_MUTEX(oprofile_perf_mutex);
28
29static struct op_counter_config *counter_config;
Robert Richterf8bbfd72012-02-23 17:07:06 +010030static DEFINE_PER_CPU(struct perf_event **, perf_events);
Matt Fleming3d90a002010-09-27 20:45:08 +010031static int num_counters;
32
33/*
34 * Overflow callback for oprofile.
35 */
Will Deacon7fcfd1a2011-07-08 18:34:42 +010036static void op_overflow_handler(struct perf_event *event,
Matt Fleming3d90a002010-09-27 20:45:08 +010037 struct perf_sample_data *data, struct pt_regs *regs)
38{
39 int id;
40 u32 cpu = smp_processor_id();
41
42 for (id = 0; id < num_counters; ++id)
Robert Richterf8bbfd72012-02-23 17:07:06 +010043 if (per_cpu(perf_events, cpu)[id] == event)
Matt Fleming3d90a002010-09-27 20:45:08 +010044 break;
45
46 if (id != num_counters)
47 oprofile_add_sample(regs, id);
48 else
49 pr_warning("oprofile: ignoring spurious overflow "
50 "on cpu %u\n", cpu);
51}
52
53/*
54 * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
55 * settings in counter_config. Attributes are created as `pinned' events and
56 * so are permanently scheduled on the PMU.
57 */
58static void op_perf_setup(void)
59{
60 int i;
61 u32 size = sizeof(struct perf_event_attr);
62 struct perf_event_attr *attr;
63
64 for (i = 0; i < num_counters; ++i) {
65 attr = &counter_config[i].attr;
66 memset(attr, 0, size);
67 attr->type = PERF_TYPE_RAW;
68 attr->size = size;
69 attr->config = counter_config[i].event;
70 attr->sample_period = counter_config[i].count;
71 attr->pinned = 1;
72 }
73}
74
75static int op_create_counter(int cpu, int event)
76{
Matt Fleming3d90a002010-09-27 20:45:08 +010077 struct perf_event *pevent;
78
Robert Richterf8bbfd72012-02-23 17:07:06 +010079 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
Robert Richter2bcb2b62010-09-29 14:43:29 +020080 return 0;
Matt Fleming3d90a002010-09-27 20:45:08 +010081
82 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
Robert Richter62684642010-10-15 12:45:00 +020083 cpu, NULL,
Avi Kivity4dc0da82011-06-29 18:42:35 +030084 op_overflow_handler, NULL);
Matt Fleming3d90a002010-09-27 20:45:08 +010085
Robert Richter2bcb2b62010-09-29 14:43:29 +020086 if (IS_ERR(pevent))
87 return PTR_ERR(pevent);
88
89 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
Robert Richter81771972010-09-29 16:52:25 +020090 perf_event_release_kernel(pevent);
Matt Fleming3d90a002010-09-27 20:45:08 +010091 pr_warning("oprofile: failed to enable event %d "
92 "on CPU %d\n", event, cpu);
Robert Richter2bcb2b62010-09-29 14:43:29 +020093 return -EBUSY;
Matt Fleming3d90a002010-09-27 20:45:08 +010094 }
95
Robert Richterf8bbfd72012-02-23 17:07:06 +010096 per_cpu(perf_events, cpu)[event] = pevent;
Robert Richter2bcb2b62010-09-29 14:43:29 +020097
98 return 0;
Matt Fleming3d90a002010-09-27 20:45:08 +010099}
100
101static void op_destroy_counter(int cpu, int event)
102{
Robert Richterf8bbfd72012-02-23 17:07:06 +0100103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
Matt Fleming3d90a002010-09-27 20:45:08 +0100104
105 if (pevent) {
106 perf_event_release_kernel(pevent);
Robert Richterf8bbfd72012-02-23 17:07:06 +0100107 per_cpu(perf_events, cpu)[event] = NULL;
Matt Fleming3d90a002010-09-27 20:45:08 +0100108 }
109}
110
111/*
112 * Called by oprofile_perf_start to create active perf events based on the
113 * perviously configured attributes.
114 */
115static int op_perf_start(void)
116{
117 int cpu, event, ret = 0;
118
119 for_each_online_cpu(cpu) {
120 for (event = 0; event < num_counters; ++event) {
121 ret = op_create_counter(cpu, event);
122 if (ret)
Robert Richter9c912832010-08-27 14:32:41 +0200123 return ret;
Matt Fleming3d90a002010-09-27 20:45:08 +0100124 }
125 }
126
Matt Fleming3d90a002010-09-27 20:45:08 +0100127 return ret;
128}
129
130/*
131 * Called by oprofile_perf_stop at the end of a profiling run.
132 */
133static void op_perf_stop(void)
134{
135 int cpu, event;
136
137 for_each_online_cpu(cpu)
138 for (event = 0; event < num_counters; ++event)
139 op_destroy_counter(cpu, event);
140}
141
Al Viroef7bca12013-07-19 15:52:42 +0400142static int oprofile_perf_create_files(struct dentry *root)
Matt Fleming3d90a002010-09-27 20:45:08 +0100143{
144 unsigned int i;
145
146 for (i = 0; i < num_counters; i++) {
147 struct dentry *dir;
148 char buf[4];
149
150 snprintf(buf, sizeof buf, "%d", i);
Al Viroecde2822013-07-19 15:58:27 +0400151 dir = oprofilefs_mkdir(root, buf);
Al Viro6af4ea02013-07-19 16:10:36 +0400152 oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
153 oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
154 oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
155 oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
156 oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
157 oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
Matt Fleming3d90a002010-09-27 20:45:08 +0100158 }
159
160 return 0;
161}
162
163static int oprofile_perf_setup(void)
164{
Thomas Gleixner2d21a292009-07-25 16:18:34 +0200165 raw_spin_lock(&oprofilefs_lock);
Matt Fleming3d90a002010-09-27 20:45:08 +0100166 op_perf_setup();
Thomas Gleixner2d21a292009-07-25 16:18:34 +0200167 raw_spin_unlock(&oprofilefs_lock);
Matt Fleming3d90a002010-09-27 20:45:08 +0100168 return 0;
169}
170
171static int oprofile_perf_start(void)
172{
173 int ret = -EBUSY;
174
175 mutex_lock(&oprofile_perf_mutex);
176 if (!oprofile_perf_enabled) {
177 ret = 0;
178 op_perf_start();
179 oprofile_perf_enabled = 1;
180 }
181 mutex_unlock(&oprofile_perf_mutex);
182 return ret;
183}
184
185static void oprofile_perf_stop(void)
186{
187 mutex_lock(&oprofile_perf_mutex);
188 if (oprofile_perf_enabled)
189 op_perf_stop();
190 oprofile_perf_enabled = 0;
191 mutex_unlock(&oprofile_perf_mutex);
192}
193
194#ifdef CONFIG_PM
Robert Richtercd254f22010-10-15 11:28:07 +0200195
Matt Fleming3d90a002010-09-27 20:45:08 +0100196static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
197{
198 mutex_lock(&oprofile_perf_mutex);
199 if (oprofile_perf_enabled)
200 op_perf_stop();
201 mutex_unlock(&oprofile_perf_mutex);
202 return 0;
203}
204
205static int oprofile_perf_resume(struct platform_device *dev)
206{
207 mutex_lock(&oprofile_perf_mutex);
208 if (oprofile_perf_enabled && op_perf_start())
209 oprofile_perf_enabled = 0;
210 mutex_unlock(&oprofile_perf_mutex);
211 return 0;
212}
213
214static struct platform_driver oprofile_driver = {
215 .driver = {
216 .name = "oprofile-perf",
217 },
218 .resume = oprofile_perf_resume,
219 .suspend = oprofile_perf_suspend,
220};
221
222static struct platform_device *oprofile_pdev;
223
224static int __init init_driverfs(void)
225{
226 int ret;
227
228 ret = platform_driver_register(&oprofile_driver);
229 if (ret)
Robert Richter9c912832010-08-27 14:32:41 +0200230 return ret;
Matt Fleming3d90a002010-09-27 20:45:08 +0100231
232 oprofile_pdev = platform_device_register_simple(
233 oprofile_driver.driver.name, 0, NULL, 0);
234 if (IS_ERR(oprofile_pdev)) {
235 ret = PTR_ERR(oprofile_pdev);
236 platform_driver_unregister(&oprofile_driver);
237 }
238
Matt Fleming3d90a002010-09-27 20:45:08 +0100239 return ret;
240}
241
Anand Gadiyarb3b3a9b2010-10-14 11:31:42 -0400242static void exit_driverfs(void)
Matt Fleming3d90a002010-09-27 20:45:08 +0100243{
244 platform_device_unregister(oprofile_pdev);
245 platform_driver_unregister(&oprofile_driver);
246}
Robert Richtercd254f22010-10-15 11:28:07 +0200247
Matt Fleming3d90a002010-09-27 20:45:08 +0100248#else
Robert Richtercd254f22010-10-15 11:28:07 +0200249
250static inline int init_driverfs(void) { return 0; }
251static inline void exit_driverfs(void) { }
252
Matt Fleming3d90a002010-09-27 20:45:08 +0100253#endif /* CONFIG_PM */
254
Robert Richtere9677b32010-09-29 15:42:30 +0200255void oprofile_perf_exit(void)
256{
257 int cpu, id;
258 struct perf_event *event;
259
260 for_each_possible_cpu(cpu) {
261 for (id = 0; id < num_counters; ++id) {
Robert Richterf8bbfd72012-02-23 17:07:06 +0100262 event = per_cpu(perf_events, cpu)[id];
Robert Richtere9677b32010-09-29 15:42:30 +0200263 if (event)
264 perf_event_release_kernel(event);
265 }
266
Robert Richterf8bbfd72012-02-23 17:07:06 +0100267 kfree(per_cpu(perf_events, cpu));
Robert Richtere9677b32010-09-29 15:42:30 +0200268 }
269
270 kfree(counter_config);
271 exit_driverfs();
272}
273
Matt Fleming3d90a002010-09-27 20:45:08 +0100274int __init oprofile_perf_init(struct oprofile_operations *ops)
275{
276 int cpu, ret = 0;
277
Robert Richtere9677b32010-09-29 15:42:30 +0200278 ret = init_driverfs();
279 if (ret)
280 return ret;
281
Matt Fleming3d90a002010-09-27 20:45:08 +0100282 num_counters = perf_num_counters();
283 if (num_counters <= 0) {
284 pr_info("oprofile: no performance counters\n");
285 ret = -ENODEV;
286 goto out;
287 }
288
289 counter_config = kcalloc(num_counters,
290 sizeof(struct op_counter_config), GFP_KERNEL);
291
292 if (!counter_config) {
293 pr_info("oprofile: failed to allocate %d "
294 "counters\n", num_counters);
295 ret = -ENOMEM;
Robert Richtere9677b32010-09-29 15:42:30 +0200296 num_counters = 0;
Matt Fleming3d90a002010-09-27 20:45:08 +0100297 goto out;
298 }
299
Matt Fleming3d90a002010-09-27 20:45:08 +0100300 for_each_possible_cpu(cpu) {
Robert Richterf8bbfd72012-02-23 17:07:06 +0100301 per_cpu(perf_events, cpu) = kcalloc(num_counters,
Matt Fleming3d90a002010-09-27 20:45:08 +0100302 sizeof(struct perf_event *), GFP_KERNEL);
Robert Richterf8bbfd72012-02-23 17:07:06 +0100303 if (!per_cpu(perf_events, cpu)) {
Matt Fleming3d90a002010-09-27 20:45:08 +0100304 pr_info("oprofile: failed to allocate %d perf events "
305 "for cpu %d\n", num_counters, cpu);
306 ret = -ENOMEM;
307 goto out;
308 }
309 }
310
311 ops->create_files = oprofile_perf_create_files;
312 ops->setup = oprofile_perf_setup;
313 ops->start = oprofile_perf_start;
314 ops->stop = oprofile_perf_stop;
315 ops->shutdown = oprofile_perf_stop;
316 ops->cpu_type = op_name_from_perf_id();
317
318 if (!ops->cpu_type)
319 ret = -ENODEV;
320 else
321 pr_info("oprofile: using %s\n", ops->cpu_type);
322
323out:
Robert Richtere9677b32010-09-29 15:42:30 +0200324 if (ret)
325 oprofile_perf_exit();
Matt Fleming3d90a002010-09-27 20:45:08 +0100326
327 return ret;
328}