blob: e662dc991c967a702a327bb44a4812b0df1c09bf [file] [log] [blame]
K.Prasad62a038d2009-06-01 23:43:33 +05301/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020018 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020019 *
20 * Thanks to Ingo Molnar for his many suggestions.
K.Prasad62a038d2009-06-01 23:43:33 +053021 */
22
23/*
24 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
25 * using the CPU's debug registers.
26 * This file contains the arch-independent routines.
27 */
28
29#include <linux/irqflags.h>
30#include <linux/kallsyms.h>
31#include <linux/notifier.h>
32#include <linux/kprobes.h>
33#include <linux/kdebug.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/percpu.h>
37#include <linux/sched.h>
38#include <linux/init.h>
39#include <linux/smp.h>
40
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020041#include <linux/hw_breakpoint.h>
42
K.Prasad62a038d2009-06-01 23:43:33 +053043#include <asm/processor.h>
44
45#ifdef CONFIG_X86
46#include <asm/debugreg.h>
47#endif
K.Prasad62a038d2009-06-01 23:43:33 +053048
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020049/*
50 * Constraints data
51 */
K.Prasad62a038d2009-06-01 23:43:33 +053052
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020053/* Number of pinned cpu breakpoints in a cpu */
54static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
55
56/* Number of pinned task breakpoints in a cpu */
57static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
58
59/* Number of non-pinned cpu/task breakpoints in a cpu */
60static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
61
62/* Gather the number of total pinned and un-pinned bp in a cpuset */
63struct bp_busy_slots {
64 unsigned int pinned;
65 unsigned int flexible;
66};
67
68/* Serialize accesses to the above constraints */
69static DEFINE_MUTEX(nr_bp_mutex);
70
71/*
72 * Report the maximum number of pinned breakpoints a task
73 * have in this cpu
74 */
75static unsigned int max_task_bp_pinned(int cpu)
K.Prasad62a038d2009-06-01 23:43:33 +053076{
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020077 int i;
78 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
K.Prasad62a038d2009-06-01 23:43:33 +053079
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020080 for (i = HBP_NUM -1; i >= 0; i--) {
81 if (tsk_pinned[i] > 0)
82 return i + 1;
K.Prasad62a038d2009-06-01 23:43:33 +053083 }
84
K.Prasad62a038d2009-06-01 23:43:33 +053085 return 0;
86}
87
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +020088/*
89 * Report the number of pinned/un-pinned breakpoints we have in
90 * a given cpu (cpu > -1) or in all of them (cpu = -1).
91 */
92static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
93{
94 if (cpu >= 0) {
95 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
96 slots->pinned += max_task_bp_pinned(cpu);
97 slots->flexible = per_cpu(nr_bp_flexible, cpu);
98
99 return;
100 }
101
102 for_each_online_cpu(cpu) {
103 unsigned int nr;
104
105 nr = per_cpu(nr_cpu_bp_pinned, cpu);
106 nr += max_task_bp_pinned(cpu);
107
108 if (nr > slots->pinned)
109 slots->pinned = nr;
110
111 nr = per_cpu(nr_bp_flexible, cpu);
112
113 if (nr > slots->flexible)
114 slots->flexible = nr;
115 }
116}
117
118/*
119 * Add a pinned breakpoint for the given task in our constraint table
120 */
121static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
122{
123 int count = 0;
124 struct perf_event *bp;
125 struct perf_event_context *ctx = tsk->perf_event_ctxp;
126 unsigned int *task_bp_pinned;
127 struct list_head *list;
128 unsigned long flags;
129
130 if (WARN_ONCE(!ctx, "No perf context for this task"))
131 return;
132
133 list = &ctx->event_list;
134
135 spin_lock_irqsave(&ctx->lock, flags);
136
137 /*
138 * The current breakpoint counter is not included in the list
139 * at the open() callback time
140 */
141 list_for_each_entry(bp, list, event_entry) {
142 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
143 count++;
144 }
145
146 spin_unlock_irqrestore(&ctx->lock, flags);
147
148 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
149 return;
150
151 task_bp_pinned = per_cpu(task_bp_pinned, cpu);
152 if (enable) {
153 task_bp_pinned[count]++;
154 if (count > 0)
155 task_bp_pinned[count-1]--;
156 } else {
157 task_bp_pinned[count]--;
158 if (count > 0)
159 task_bp_pinned[count-1]++;
160 }
161}
162
163/*
164 * Add/remove the given breakpoint in our constraint table
165 */
166static void toggle_bp_slot(struct perf_event *bp, bool enable)
167{
168 int cpu = bp->cpu;
169 struct task_struct *tsk = bp->ctx->task;
170
171 /* Pinned counter task profiling */
172 if (tsk) {
173 if (cpu >= 0) {
174 toggle_bp_task_slot(tsk, cpu, enable);
175 return;
176 }
177
178 for_each_online_cpu(cpu)
179 toggle_bp_task_slot(tsk, cpu, enable);
180 return;
181 }
182
183 /* Pinned counter cpu profiling */
184 if (enable)
185 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
186 else
187 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
188}
189
190/*
191 * Contraints to check before allowing this new breakpoint counter:
192 *
193 * == Non-pinned counter == (Considered as pinned for now)
194 *
195 * - If attached to a single cpu, check:
196 *
197 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
198 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
199 *
200 * -> If there are already non-pinned counters in this cpu, it means
201 * there is already a free slot for them.
202 * Otherwise, we check that the maximum number of per task
203 * breakpoints (for this cpu) plus the number of per cpu breakpoint
204 * (for this cpu) doesn't cover every registers.
205 *
206 * - If attached to every cpus, check:
207 *
208 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
209 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
210 *
211 * -> This is roughly the same, except we check the number of per cpu
212 * bp for every cpu and we keep the max one. Same for the per tasks
213 * breakpoints.
214 *
215 *
216 * == Pinned counter ==
217 *
218 * - If attached to a single cpu, check:
219 *
220 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
221 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
222 *
223 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
224 * one register at least (or they will never be fed).
225 *
226 * - If attached to every cpus, check:
227 *
228 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
229 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
230 */
231int reserve_bp_slot(struct perf_event *bp)
232{
233 struct bp_busy_slots slots = {0};
234 int ret = 0;
235
236 mutex_lock(&nr_bp_mutex);
237
238 fetch_bp_busy_slots(&slots, bp->cpu);
239
240 /* Flexible counters need to keep at least one slot */
241 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
242 ret = -ENOSPC;
243 goto end;
244 }
245
246 toggle_bp_slot(bp, true);
247
248end:
249 mutex_unlock(&nr_bp_mutex);
250
251 return ret;
252}
253
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200254void release_bp_slot(struct perf_event *bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530255{
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200256 mutex_lock(&nr_bp_mutex);
257
258 toggle_bp_slot(bp, false);
259
260 mutex_unlock(&nr_bp_mutex);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200261}
K.Prasad62a038d2009-06-01 23:43:33 +0530262
Frederic Weisbeckerba1c8132009-09-10 09:26:21 +0200263
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200264int __register_perf_hw_breakpoint(struct perf_event *bp)
265{
266 int ret;
K.Prasad62a038d2009-06-01 23:43:33 +0530267
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200268 ret = reserve_bp_slot(bp);
269 if (ret)
270 return ret;
K.Prasad62a038d2009-06-01 23:43:33 +0530271
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200272 if (!bp->attr.disabled)
273 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
K.Prasad62a038d2009-06-01 23:43:33 +0530274
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200275 return ret;
276}
K.Prasad62a038d2009-06-01 23:43:33 +0530277
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200278int register_perf_hw_breakpoint(struct perf_event *bp)
279{
280 bp->callback = perf_bp_event;
281
282 return __register_perf_hw_breakpoint(bp);
K.Prasad62a038d2009-06-01 23:43:33 +0530283}
284
285/*
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200286 * Register a breakpoint bound to a task and a given cpu.
287 * If cpu is -1, the breakpoint is active for the task in every cpu
288 * If the task is -1, the breakpoint is active for every tasks in the given
289 * cpu.
K.Prasad62a038d2009-06-01 23:43:33 +0530290 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200291static struct perf_event *
292register_user_hw_breakpoint_cpu(unsigned long addr,
293 int len,
294 int type,
295 perf_callback_t triggered,
296 pid_t pid,
297 int cpu,
298 bool active)
K.Prasad62a038d2009-06-01 23:43:33 +0530299{
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200300 struct perf_event_attr *attr;
301 struct perf_event *bp;
K.Prasad62a038d2009-06-01 23:43:33 +0530302
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200303 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
304 if (!attr)
305 return ERR_PTR(-ENOMEM);
K.Prasad62a038d2009-06-01 23:43:33 +0530306
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200307 attr->type = PERF_TYPE_BREAKPOINT;
308 attr->size = sizeof(*attr);
309 attr->bp_addr = addr;
310 attr->bp_len = len;
311 attr->bp_type = type;
K.Prasad62a038d2009-06-01 23:43:33 +0530312 /*
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200313 * Such breakpoints are used by debuggers to trigger signals when
314 * we hit the excepted memory op. We can't miss such events, they
315 * must be pinned.
K.Prasad62a038d2009-06-01 23:43:33 +0530316 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200317 attr->pinned = 1;
K.Prasad62a038d2009-06-01 23:43:33 +0530318
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200319 if (!active)
320 attr->disabled = 1;
K.Prasad62a038d2009-06-01 23:43:33 +0530321
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200322 bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
323 kfree(attr);
K.Prasad62a038d2009-06-01 23:43:33 +0530324
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200325 return bp;
K.Prasad62a038d2009-06-01 23:43:33 +0530326}
327
328/**
329 * register_user_hw_breakpoint - register a hardware breakpoint for user space
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200330 * @addr: is the memory address that triggers the breakpoint
331 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
332 * @type: the type of the access to the memory (read/write/exec)
333 * @triggered: callback to trigger when we hit the breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530334 * @tsk: pointer to 'task_struct' of the process to which the address belongs
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200335 * @active: should we activate it while registering it
K.Prasad62a038d2009-06-01 23:43:33 +0530336 *
337 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200338struct perf_event *
339register_user_hw_breakpoint(unsigned long addr,
340 int len,
341 int type,
342 perf_callback_t triggered,
343 struct task_struct *tsk,
344 bool active)
K.Prasad62a038d2009-06-01 23:43:33 +0530345{
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200346 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
347 tsk->pid, -1, active);
K.Prasad62a038d2009-06-01 23:43:33 +0530348}
349EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
350
351/**
352 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200353 * @bp: the breakpoint structure to modify
354 * @addr: is the memory address that triggers the breakpoint
355 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
356 * @type: the type of the access to the memory (read/write/exec)
357 * @triggered: callback to trigger when we hit the breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530358 * @tsk: pointer to 'task_struct' of the process to which the address belongs
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200359 * @active: should we activate it while registering it
K.Prasad62a038d2009-06-01 23:43:33 +0530360 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200361struct perf_event *
362modify_user_hw_breakpoint(struct perf_event *bp,
363 unsigned long addr,
364 int len,
365 int type,
366 perf_callback_t triggered,
367 struct task_struct *tsk,
368 bool active)
K.Prasad62a038d2009-06-01 23:43:33 +0530369{
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200370 /*
371 * FIXME: do it without unregistering
372 * - We don't want to lose our slot
373 * - If the new bp is incorrect, don't lose the older one
374 */
375 unregister_hw_breakpoint(bp);
K.Prasad62a038d2009-06-01 23:43:33 +0530376
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200377 return register_user_hw_breakpoint(addr, len, type, triggered,
378 tsk, active);
K.Prasad62a038d2009-06-01 23:43:33 +0530379}
380EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
381
382/**
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200383 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
K.Prasad62a038d2009-06-01 23:43:33 +0530384 * @bp: the breakpoint structure to unregister
K.Prasad62a038d2009-06-01 23:43:33 +0530385 */
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200386void unregister_hw_breakpoint(struct perf_event *bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530387{
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200388 if (!bp)
K.Prasad62a038d2009-06-01 23:43:33 +0530389 return;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200390 perf_event_release_kernel(bp);
391}
392EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
393
394static struct perf_event *
395register_kernel_hw_breakpoint_cpu(unsigned long addr,
396 int len,
397 int type,
398 perf_callback_t triggered,
399 int cpu,
400 bool active)
401{
402 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
403 -1, cpu, active);
404}
405
406/**
407 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
408 * @addr: is the memory address that triggers the breakpoint
409 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
410 * @type: the type of the access to the memory (read/write/exec)
411 * @triggered: callback to trigger when we hit the breakpoint
412 * @active: should we activate it while registering it
413 *
414 * @return a set of per_cpu pointers to perf events
415 */
416struct perf_event **
417register_wide_hw_breakpoint(unsigned long addr,
418 int len,
419 int type,
420 perf_callback_t triggered,
421 bool active)
422{
423 struct perf_event **cpu_events, **pevent, *bp;
424 long err;
425 int cpu;
426
427 cpu_events = alloc_percpu(typeof(*cpu_events));
428 if (!cpu_events)
429 return ERR_PTR(-ENOMEM);
430
431 for_each_possible_cpu(cpu) {
432 pevent = per_cpu_ptr(cpu_events, cpu);
433 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
434 triggered, cpu, active);
435
436 *pevent = bp;
437
438 if (IS_ERR(bp) || !bp) {
439 err = PTR_ERR(bp);
440 goto fail;
441 }
K.Prasad62a038d2009-06-01 23:43:33 +0530442 }
443
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200444 return cpu_events;
K.Prasad62a038d2009-06-01 23:43:33 +0530445
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200446fail:
447 for_each_possible_cpu(cpu) {
448 pevent = per_cpu_ptr(cpu_events, cpu);
449 if (IS_ERR(*pevent) || !*pevent)
450 break;
451 unregister_hw_breakpoint(*pevent);
452 }
453 free_percpu(cpu_events);
454 /* return the error if any */
455 return ERR_PTR(err);
K.Prasad62a038d2009-06-01 23:43:33 +0530456}
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200457
458/**
459 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
460 * @cpu_events: the per cpu set of events to unregister
461 */
462void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
463{
464 int cpu;
465 struct perf_event **pevent;
466
467 for_each_possible_cpu(cpu) {
468 pevent = per_cpu_ptr(cpu_events, cpu);
469 unregister_hw_breakpoint(*pevent);
470 }
471 free_percpu(cpu_events);
472}
473
K.Prasad62a038d2009-06-01 23:43:33 +0530474
475static struct notifier_block hw_breakpoint_exceptions_nb = {
476 .notifier_call = hw_breakpoint_exceptions_notify,
477 /* we need to be notified first */
478 .priority = 0x7fffffff
479};
480
481static int __init init_hw_breakpoint(void)
482{
483 return register_die_notifier(&hw_breakpoint_exceptions_nb);
484}
K.Prasad62a038d2009-06-01 23:43:33 +0530485core_initcall(init_hw_breakpoint);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200486
487
488struct pmu perf_ops_bp = {
489 .enable = arch_install_hw_breakpoint,
490 .disable = arch_uninstall_hw_breakpoint,
491 .read = hw_breakpoint_pmu_read,
492 .unthrottle = hw_breakpoint_pmu_unthrottle
493};