blob: fe8a916507ed278cf545196561878a842547a865 [file] [log] [blame]
K.Prasad62a038d2009-06-01 23:43:33 +05301/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020018 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020019 *
20 * Thanks to Ingo Molnar for his many suggestions.
K.Prasadba6909b2009-11-23 21:17:13 +053021 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
K.Prasad62a038d2009-06-01 23:43:33 +053025 */
26
27/*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33#include <linux/irqflags.h>
34#include <linux/kallsyms.h>
35#include <linux/notifier.h>
36#include <linux/kprobes.h>
37#include <linux/kdebug.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/percpu.h>
41#include <linux/sched.h>
42#include <linux/init.h>
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +020043#include <linux/slab.h>
Frederic Weisbecker45a73372010-06-23 23:00:37 +020044#include <linux/list.h>
Li Zefan88f7a892009-12-30 14:22:22 +080045#include <linux/cpu.h>
K.Prasad62a038d2009-06-01 23:43:33 +053046#include <linux/smp.h>
47
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020048#include <linux/hw_breakpoint.h>
49
Frederic Weisbecker01027522010-04-11 18:55:56 +020050
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020051/*
52 * Constraints data
53 */
K.Prasad62a038d2009-06-01 23:43:33 +053054
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020055/* Number of pinned cpu breakpoints in a cpu */
Frederic Weisbecker01027522010-04-11 18:55:56 +020056static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020057
58/* Number of pinned task breakpoints in a cpu */
Frederic Weisbecker777d0412010-05-03 15:39:45 +020059static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020060
61/* Number of non-pinned cpu/task breakpoints in a cpu */
Frederic Weisbecker01027522010-04-11 18:55:56 +020062static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020063
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +020064static int nr_slots[TYPE_MAX];
65
Frederic Weisbecker45a73372010-06-23 23:00:37 +020066/* Keep track of the breakpoints attached to tasks */
67static LIST_HEAD(bp_task_head);
68
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +020069static int constraints_initialized;
70
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020071/* Gather the number of total pinned and un-pinned bp in a cpuset */
72struct bp_busy_slots {
73 unsigned int pinned;
74 unsigned int flexible;
75};
76
77/* Serialize accesses to the above constraints */
78static DEFINE_MUTEX(nr_bp_mutex);
79
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +020080__weak int hw_breakpoint_weight(struct perf_event *bp)
81{
82 return 1;
83}
84
Frederic Weisbecker01027522010-04-11 18:55:56 +020085static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
86{
87 if (bp->attr.bp_type & HW_BREAKPOINT_RW)
88 return TYPE_DATA;
89
90 return TYPE_INST;
91}
92
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020093/*
94 * Report the maximum number of pinned breakpoints a task
95 * have in this cpu
96 */
Frederic Weisbecker01027522010-04-11 18:55:56 +020097static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
K.Prasad62a038d2009-06-01 23:43:33 +053098{
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020099 int i;
Frederic Weisbecker01027522010-04-11 18:55:56 +0200100 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
K.Prasad62a038d2009-06-01 23:43:33 +0530101
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200102 for (i = nr_slots[type] - 1; i >= 0; i--) {
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200103 if (tsk_pinned[i] > 0)
104 return i + 1;
K.Prasad62a038d2009-06-01 23:43:33 +0530105 }
106
K.Prasad62a038d2009-06-01 23:43:33 +0530107 return 0;
108}
109
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200110/*
111 * Count the number of breakpoints of the same type and same task.
112 * The given event must be not on the list.
113 */
Michael Neuling0d855352012-10-26 18:28:56 +0200114static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200115{
Peter Zijlstrad580ff82010-10-14 17:43:23 +0200116 struct task_struct *tsk = bp->hw.bp_target;
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200117 struct perf_event *iter;
Frederic Weisbecker56053172009-12-07 06:46:48 +0100118 int count = 0;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200119
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
Michael Neuling0d855352012-10-26 18:28:56 +0200121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu)
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200124 count += hw_breakpoint_weight(iter);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200125 }
126
Frederic Weisbecker56053172009-12-07 06:46:48 +0100127 return count;
128}
129
130/*
131 * Report the number of pinned/un-pinned breakpoints we have in
132 * a given cpu (cpu > -1) or in all of them (cpu = -1).
133 */
134static void
Frederic Weisbecker01027522010-04-11 18:55:56 +0200135fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
136 enum bp_type_idx type)
Frederic Weisbecker56053172009-12-07 06:46:48 +0100137{
138 int cpu = bp->cpu;
Peter Zijlstrad580ff82010-10-14 17:43:23 +0200139 struct task_struct *tsk = bp->hw.bp_target;
Frederic Weisbecker56053172009-12-07 06:46:48 +0100140
141 if (cpu >= 0) {
Frederic Weisbecker01027522010-04-11 18:55:56 +0200142 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100143 if (!tsk)
Frederic Weisbecker01027522010-04-11 18:55:56 +0200144 slots->pinned += max_task_bp_pinned(cpu, type);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100145 else
Michael Neuling0d855352012-10-26 18:28:56 +0200146 slots->pinned += task_bp_pinned(cpu, bp, type);
Frederic Weisbecker01027522010-04-11 18:55:56 +0200147 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100148
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200149 return;
Frederic Weisbecker56053172009-12-07 06:46:48 +0100150 }
151
152 for_each_online_cpu(cpu) {
153 unsigned int nr;
154
Frederic Weisbecker01027522010-04-11 18:55:56 +0200155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100156 if (!tsk)
Frederic Weisbecker01027522010-04-11 18:55:56 +0200157 nr += max_task_bp_pinned(cpu, type);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100158 else
Michael Neuling0d855352012-10-26 18:28:56 +0200159 nr += task_bp_pinned(cpu, bp, type);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100160
161 if (nr > slots->pinned)
162 slots->pinned = nr;
163
Frederic Weisbecker01027522010-04-11 18:55:56 +0200164 nr = per_cpu(nr_bp_flexible[type], cpu);
Frederic Weisbecker56053172009-12-07 06:46:48 +0100165
166 if (nr > slots->flexible)
167 slots->flexible = nr;
168 }
169}
170
171/*
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200172 * For now, continue to consider flexible as pinned, until we can
173 * ensure no flexible event can ever be scheduled before a pinned event
174 * in a same cpu.
175 */
176static void
177fetch_this_slot(struct bp_busy_slots *slots, int weight)
178{
179 slots->pinned += weight;
180}
181
182/*
Frederic Weisbecker56053172009-12-07 06:46:48 +0100183 * Add a pinned breakpoint for the given task in our constraint table
184 */
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200185static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200186 enum bp_type_idx type, int weight)
Frederic Weisbecker56053172009-12-07 06:46:48 +0100187{
188 unsigned int *tsk_pinned;
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200189 int old_count = 0;
190 int old_idx = 0;
191 int idx = 0;
Frederic Weisbecker56053172009-12-07 06:46:48 +0100192
Michael Neuling0d855352012-10-26 18:28:56 +0200193 old_count = task_bp_pinned(cpu, bp, type);
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200194 old_idx = old_count - 1;
195 idx = old_idx + weight;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200196
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200197 /* tsk_pinned[n] is the number of tasks having n breakpoints */
Frederic Weisbecker01027522010-04-11 18:55:56 +0200198 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200199 if (enable) {
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200200 tsk_pinned[idx]++;
201 if (old_count > 0)
202 tsk_pinned[old_idx]--;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200203 } else {
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200204 tsk_pinned[idx]--;
205 if (old_count > 0)
206 tsk_pinned[old_idx]++;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200207 }
208}
209
210/*
211 * Add/remove the given breakpoint in our constraint table
212 */
Frederic Weisbecker01027522010-04-11 18:55:56 +0200213static void
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200214toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
215 int weight)
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200216{
217 int cpu = bp->cpu;
Peter Zijlstrad580ff82010-10-14 17:43:23 +0200218 struct task_struct *tsk = bp->hw.bp_target;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200219
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200220 /* Pinned counter cpu profiling */
221 if (!tsk) {
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200222
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200223 if (enable)
224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
225 else
226 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200227 return;
228 }
229
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200230 /* Pinned counter task profiling */
231
232 if (!enable)
233 list_del(&bp->hw.bp_list);
234
235 if (cpu >= 0) {
236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
237 } else {
238 for_each_online_cpu(cpu)
239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
240 }
241
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200242 if (enable)
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200243 list_add_tail(&bp->hw.bp_list, &bp_task_head);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200244}
245
246/*
K.Prasadf7136c52010-06-15 11:34:34 +0530247 * Function to perform processor-specific cleanup during unregistration
248 */
249__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
250{
251 /*
252 * A weak stub function here for those archs that don't define
253 * it inside arch/.../kernel/hw_breakpoint.c
254 */
255}
256
257/*
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200258 * Contraints to check before allowing this new breakpoint counter:
259 *
260 * == Non-pinned counter == (Considered as pinned for now)
261 *
262 * - If attached to a single cpu, check:
263 *
264 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
Stephen Rothwell6ab88862009-12-08 18:25:15 +1100265 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200266 *
267 * -> If there are already non-pinned counters in this cpu, it means
268 * there is already a free slot for them.
269 * Otherwise, we check that the maximum number of per task
270 * breakpoints (for this cpu) plus the number of per cpu breakpoint
271 * (for this cpu) doesn't cover every registers.
272 *
273 * - If attached to every cpus, check:
274 *
275 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
Stephen Rothwell6ab88862009-12-08 18:25:15 +1100276 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200277 *
278 * -> This is roughly the same, except we check the number of per cpu
279 * bp for every cpu and we keep the max one. Same for the per tasks
280 * breakpoints.
281 *
282 *
283 * == Pinned counter ==
284 *
285 * - If attached to a single cpu, check:
286 *
287 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
Stephen Rothwell6ab88862009-12-08 18:25:15 +1100288 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200289 *
290 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
291 * one register at least (or they will never be fed).
292 *
293 * - If attached to every cpus, check:
294 *
295 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
Stephen Rothwell6ab88862009-12-08 18:25:15 +1100296 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200297 */
Jason Wessel5352ae62010-01-28 17:04:43 -0600298static int __reserve_bp_slot(struct perf_event *bp)
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200299{
300 struct bp_busy_slots slots = {0};
Frederic Weisbecker01027522010-04-11 18:55:56 +0200301 enum bp_type_idx type;
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200302 int weight;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200303
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200304 /* We couldn't initialize breakpoint constraints on boot */
305 if (!constraints_initialized)
306 return -ENOMEM;
307
Frederic Weisbecker01027522010-04-11 18:55:56 +0200308 /* Basic checks */
309 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
310 bp->attr.bp_type == HW_BREAKPOINT_INVALID)
311 return -EINVAL;
312
313 type = find_slot_idx(bp);
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200314 weight = hw_breakpoint_weight(bp);
315
Frederic Weisbecker01027522010-04-11 18:55:56 +0200316 fetch_bp_busy_slots(&slots, bp, type);
Frederic Weisbecker45a73372010-06-23 23:00:37 +0200317 /*
318 * Simulate the addition of this breakpoint to the constraints
319 * and see the result.
320 */
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200321 fetch_this_slot(&slots, weight);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200322
323 /* Flexible counters need to keep at least one slot */
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200324 if (slots.pinned + (!!slots.flexible) > nr_slots[type])
Jason Wessel5352ae62010-01-28 17:04:43 -0600325 return -ENOSPC;
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200326
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200327 toggle_bp_slot(bp, true, type, weight);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200328
Jason Wessel5352ae62010-01-28 17:04:43 -0600329 return 0;
330}
331
332int reserve_bp_slot(struct perf_event *bp)
333{
334 int ret;
335
336 mutex_lock(&nr_bp_mutex);
337
338 ret = __reserve_bp_slot(bp);
339
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200340 mutex_unlock(&nr_bp_mutex);
341
342 return ret;
343}
344
Jason Wessel5352ae62010-01-28 17:04:43 -0600345static void __release_bp_slot(struct perf_event *bp)
346{
Frederic Weisbecker01027522010-04-11 18:55:56 +0200347 enum bp_type_idx type;
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200348 int weight;
Frederic Weisbecker01027522010-04-11 18:55:56 +0200349
350 type = find_slot_idx(bp);
Frederic Weisbeckerf93a2052010-04-13 00:32:30 +0200351 weight = hw_breakpoint_weight(bp);
352 toggle_bp_slot(bp, false, type, weight);
Jason Wessel5352ae62010-01-28 17:04:43 -0600353}
354
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200355void release_bp_slot(struct perf_event *bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530356{
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200357 mutex_lock(&nr_bp_mutex);
358
K.Prasadf7136c52010-06-15 11:34:34 +0530359 arch_unregister_hw_breakpoint(bp);
Jason Wessel5352ae62010-01-28 17:04:43 -0600360 __release_bp_slot(bp);
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200361
362 mutex_unlock(&nr_bp_mutex);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200363}
K.Prasad62a038d2009-06-01 23:43:33 +0530364
Jason Wessel5352ae62010-01-28 17:04:43 -0600365/*
366 * Allow the kernel debugger to reserve breakpoint slots without
367 * taking a lock using the dbg_* variant of for the reserve and
368 * release breakpoint slots.
369 */
370int dbg_reserve_bp_slot(struct perf_event *bp)
371{
372 if (mutex_is_locked(&nr_bp_mutex))
373 return -1;
374
375 return __reserve_bp_slot(bp);
376}
377
378int dbg_release_bp_slot(struct perf_event *bp)
379{
380 if (mutex_is_locked(&nr_bp_mutex))
381 return -1;
382
383 __release_bp_slot(bp);
384
385 return 0;
386}
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200387
Frederic Weisbeckerb2812d02010-04-18 18:11:53 +0200388static int validate_hw_breakpoint(struct perf_event *bp)
389{
390 int ret;
391
392 ret = arch_validate_hwbkpt_settings(bp);
393 if (ret)
394 return ret;
395
396 if (arch_check_bp_in_kernelspace(bp)) {
397 if (bp->attr.exclude_kernel)
398 return -EINVAL;
399 /*
400 * Don't let unprivileged users set a breakpoint in the trap
401 * path to avoid trap recursion attacks.
402 */
403 if (!capable(CAP_SYS_ADMIN))
404 return -EPERM;
405 }
406
407 return 0;
408}
409
Frederic Weisbeckerb326e952009-12-05 09:44:31 +0100410int register_perf_hw_breakpoint(struct perf_event *bp)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200411{
412 int ret;
K.Prasad62a038d2009-06-01 23:43:33 +0530413
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200414 ret = reserve_bp_slot(bp);
415 if (ret)
416 return ret;
K.Prasad62a038d2009-06-01 23:43:33 +0530417
Frederic Weisbeckerb2812d02010-04-18 18:11:53 +0200418 ret = validate_hw_breakpoint(bp);
K.Prasad62a038d2009-06-01 23:43:33 +0530419
Mahesh Salgaonkarb23ff0e2010-01-21 18:25:16 +0530420 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
421 if (ret)
422 release_bp_slot(bp);
423
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200424 return ret;
425}
K.Prasad62a038d2009-06-01 23:43:33 +0530426
K.Prasad62a038d2009-06-01 23:43:33 +0530427/**
428 * register_user_hw_breakpoint - register a hardware breakpoint for user space
Frederic Weisbecker5fa10b22009-11-27 04:55:53 +0100429 * @attr: breakpoint attributes
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200430 * @triggered: callback to trigger when we hit the breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530431 * @tsk: pointer to 'task_struct' of the process to which the address belongs
K.Prasad62a038d2009-06-01 23:43:33 +0530432 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200433struct perf_event *
Frederic Weisbecker5fa10b22009-11-27 04:55:53 +0100434register_user_hw_breakpoint(struct perf_event_attr *attr,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +0100435 perf_overflow_handler_t triggered,
Avi Kivity4dc0da82011-06-29 18:42:35 +0300436 void *context,
Frederic Weisbecker5fa10b22009-11-27 04:55:53 +0100437 struct task_struct *tsk)
K.Prasad62a038d2009-06-01 23:43:33 +0530438{
Avi Kivity4dc0da82011-06-29 18:42:35 +0300439 return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
440 context);
K.Prasad62a038d2009-06-01 23:43:33 +0530441}
442EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
443
444/**
445 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200446 * @bp: the breakpoint structure to modify
Frederic Weisbecker5fa10b22009-11-27 04:55:53 +0100447 * @attr: new breakpoint attributes
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200448 * @triggered: callback to trigger when we hit the breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530449 * @tsk: pointer to 'task_struct' of the process to which the address belongs
K.Prasad62a038d2009-06-01 23:43:33 +0530450 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100451int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
K.Prasad62a038d2009-06-01 23:43:33 +0530452{
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100453 u64 old_addr = bp->attr.bp_addr;
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +0530454 u64 old_len = bp->attr.bp_len;
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100455 int old_type = bp->attr.bp_type;
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100456 int err = 0;
K.Prasad62a038d2009-06-01 23:43:33 +0530457
K.Prasad500ad2d2012-08-02 13:46:35 +0530458 /*
459 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
460 * will not be possible to raise IPIs that invoke __perf_event_disable.
461 * So call the function directly after making sure we are targeting the
462 * current task.
463 */
464 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
465 __perf_event_disable(bp);
466 else
467 perf_event_disable(bp);
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100468
469 bp->attr.bp_addr = attr->bp_addr;
470 bp->attr.bp_type = attr->bp_type;
471 bp->attr.bp_len = attr->bp_len;
472
473 if (attr->disabled)
474 goto end;
475
Frederic Weisbeckerb2812d02010-04-18 18:11:53 +0200476 err = validate_hw_breakpoint(bp);
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100477 if (!err)
478 perf_event_enable(bp);
479
480 if (err) {
481 bp->attr.bp_addr = old_addr;
482 bp->attr.bp_type = old_type;
483 bp->attr.bp_len = old_len;
484 if (!bp->attr.disabled)
485 perf_event_enable(bp);
486
487 return err;
488 }
489
490end:
491 bp->attr.disabled = attr->disabled;
492
493 return 0;
K.Prasad62a038d2009-06-01 23:43:33 +0530494}
495EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
496
497/**
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200498 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530499 * @bp: the breakpoint structure to unregister
K.Prasad62a038d2009-06-01 23:43:33 +0530500 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200501void unregister_hw_breakpoint(struct perf_event *bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530502{
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200503 if (!bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530504 return;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200505 perf_event_release_kernel(bp);
506}
507EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
508
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200509/**
510 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
Frederic Weisbeckerdd1853c2009-11-27 04:55:54 +0100511 * @attr: breakpoint attributes
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200512 * @triggered: callback to trigger when we hit the breakpoint
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200513 *
514 * @return a set of per_cpu pointers to perf events
515 */
Tejun Heo44ee6352010-02-17 10:50:50 +0900516struct perf_event * __percpu *
Frederic Weisbeckerdd1853c2009-11-27 04:55:54 +0100517register_wide_hw_breakpoint(struct perf_event_attr *attr,
Avi Kivity4dc0da82011-06-29 18:42:35 +0300518 perf_overflow_handler_t triggered,
519 void *context)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200520{
Tejun Heo44ee6352010-02-17 10:50:50 +0900521 struct perf_event * __percpu *cpu_events, **pevent, *bp;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200522 long err;
523 int cpu;
524
525 cpu_events = alloc_percpu(typeof(*cpu_events));
526 if (!cpu_events)
Tejun Heo44ee6352010-02-17 10:50:50 +0900527 return (void __percpu __force *)ERR_PTR(-ENOMEM);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200528
Li Zefan88f7a892009-12-30 14:22:22 +0800529 get_online_cpus();
530 for_each_online_cpu(cpu) {
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200531 pevent = per_cpu_ptr(cpu_events, cpu);
Avi Kivity4dc0da82011-06-29 18:42:35 +0300532 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
533 triggered, context);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200534
535 *pevent = bp;
536
Frederic Weisbecker605bfae2009-11-26 05:35:42 +0100537 if (IS_ERR(bp)) {
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200538 err = PTR_ERR(bp);
539 goto fail;
540 }
K.Prasad62a038d2009-06-01 23:43:33 +0530541 }
Li Zefan88f7a892009-12-30 14:22:22 +0800542 put_online_cpus();
K.Prasad62a038d2009-06-01 23:43:33 +0530543
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200544 return cpu_events;
K.Prasad62a038d2009-06-01 23:43:33 +0530545
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200546fail:
Li Zefan88f7a892009-12-30 14:22:22 +0800547 for_each_online_cpu(cpu) {
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200548 pevent = per_cpu_ptr(cpu_events, cpu);
Frederic Weisbecker605bfae2009-11-26 05:35:42 +0100549 if (IS_ERR(*pevent))
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200550 break;
551 unregister_hw_breakpoint(*pevent);
552 }
Li Zefan88f7a892009-12-30 14:22:22 +0800553 put_online_cpus();
554
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200555 free_percpu(cpu_events);
Tejun Heo44ee6352010-02-17 10:50:50 +0900556 return (void __percpu __force *)ERR_PTR(err);
K.Prasad62a038d2009-06-01 23:43:33 +0530557}
Frederic Weisbeckerf60d24d2009-11-10 10:17:07 +0100558EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200559
560/**
561 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
562 * @cpu_events: the per cpu set of events to unregister
563 */
Tejun Heo44ee6352010-02-17 10:50:50 +0900564void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200565{
566 int cpu;
567 struct perf_event **pevent;
568
569 for_each_possible_cpu(cpu) {
570 pevent = per_cpu_ptr(cpu_events, cpu);
571 unregister_hw_breakpoint(*pevent);
572 }
573 free_percpu(cpu_events);
574}
Frederic Weisbeckerf60d24d2009-11-10 10:17:07 +0100575EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
K.Prasad62a038d2009-06-01 23:43:33 +0530576
577static struct notifier_block hw_breakpoint_exceptions_nb = {
578 .notifier_call = hw_breakpoint_exceptions_notify,
579 /* we need to be notified first */
580 .priority = 0x7fffffff
581};
582
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200583static void bp_perf_event_destroy(struct perf_event *event)
584{
585 release_bp_slot(event);
586}
587
588static int hw_breakpoint_event_init(struct perf_event *bp)
589{
590 int err;
591
592 if (bp->attr.type != PERF_TYPE_BREAKPOINT)
593 return -ENOENT;
594
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100595 /*
596 * no branch sampling for breakpoint events
597 */
598 if (has_branch_stack(bp))
599 return -EOPNOTSUPP;
600
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200601 err = register_perf_hw_breakpoint(bp);
602 if (err)
603 return err;
604
605 bp->destroy = bp_perf_event_destroy;
606
607 return 0;
608}
609
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200610static int hw_breakpoint_add(struct perf_event *bp, int flags)
611{
612 if (!(flags & PERF_EF_START))
613 bp->hw.state = PERF_HES_STOPPED;
614
615 return arch_install_hw_breakpoint(bp);
616}
617
618static void hw_breakpoint_del(struct perf_event *bp, int flags)
619{
620 arch_uninstall_hw_breakpoint(bp);
621}
622
623static void hw_breakpoint_start(struct perf_event *bp, int flags)
624{
625 bp->hw.state = 0;
626}
627
628static void hw_breakpoint_stop(struct perf_event *bp, int flags)
629{
630 bp->hw.state = PERF_HES_STOPPED;
631}
632
Peter Zijlstra35edc2a2011-11-20 20:36:02 +0100633static int hw_breakpoint_event_idx(struct perf_event *bp)
634{
635 return 0;
636}
637
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200638static struct pmu perf_breakpoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +0200639 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
640
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200641 .event_init = hw_breakpoint_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200642 .add = hw_breakpoint_add,
643 .del = hw_breakpoint_del,
644 .start = hw_breakpoint_start,
645 .stop = hw_breakpoint_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200646 .read = hw_breakpoint_pmu_read,
Peter Zijlstra35edc2a2011-11-20 20:36:02 +0100647
648 .event_idx = hw_breakpoint_event_idx,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200649};
650
Jason Wessel3c502e72010-11-04 17:33:01 -0500651int __init init_hw_breakpoint(void)
K.Prasad62a038d2009-06-01 23:43:33 +0530652{
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200653 unsigned int **task_bp_pinned;
654 int cpu, err_cpu;
655 int i;
656
657 for (i = 0; i < TYPE_MAX; i++)
658 nr_slots[i] = hw_breakpoint_slots(i);
659
660 for_each_possible_cpu(cpu) {
661 for (i = 0; i < TYPE_MAX; i++) {
662 task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
663 *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
664 GFP_KERNEL);
665 if (!*task_bp_pinned)
666 goto err_alloc;
667 }
668 }
669
670 constraints_initialized = 1;
671
Peter Zijlstra2e80a822010-11-17 23:17:36 +0100672 perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200673
K.Prasad62a038d2009-06-01 23:43:33 +0530674 return register_die_notifier(&hw_breakpoint_exceptions_nb);
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200675
676 err_alloc:
677 for_each_possible_cpu(err_cpu) {
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200678 for (i = 0; i < TYPE_MAX; i++)
679 kfree(per_cpu(nr_task_bp_pinned[i], cpu));
Namhyung Kim30ce2f72012-02-28 10:19:38 +0900680 if (err_cpu == cpu)
681 break;
Frederic Weisbeckerfeef47d2010-04-23 05:59:55 +0200682 }
683
684 return -ENOMEM;
K.Prasad62a038d2009-06-01 23:43:33 +0530685}
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200686
687