K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License as published by |
| 4 | * the Free Software Foundation; either version 2 of the License, or |
| 5 | * (at your option) any later version. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * You should have received a copy of the GNU General Public License |
| 13 | * along with this program; if not, write to the Free Software |
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 15 | * |
| 16 | * Copyright (C) 2007 Alan Stern |
| 17 | * Copyright (C) IBM Corporation, 2009 |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 19 | * |
| 20 | * Thanks to Ingo Molnar for his many suggestions. |
K.Prasad | ba6909b | 2009-11-23 21:17:13 +0530 | [diff] [blame] | 21 | * |
| 22 | * Authors: Alan Stern <stern@rowland.harvard.edu> |
| 23 | * K.Prasad <prasad@linux.vnet.ibm.com> |
| 24 | * Frederic Weisbecker <fweisbec@gmail.com> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 25 | */ |
| 26 | |
| 27 | /* |
| 28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, |
| 29 | * using the CPU's debug registers. |
| 30 | * This file contains the arch-independent routines. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/irqflags.h> |
| 34 | #include <linux/kallsyms.h> |
| 35 | #include <linux/notifier.h> |
| 36 | #include <linux/kprobes.h> |
| 37 | #include <linux/kdebug.h> |
| 38 | #include <linux/kernel.h> |
| 39 | #include <linux/module.h> |
| 40 | #include <linux/percpu.h> |
| 41 | #include <linux/sched.h> |
| 42 | #include <linux/init.h> |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 43 | #include <linux/slab.h> |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 44 | #include <linux/list.h> |
Li Zefan | 88f7a89 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 45 | #include <linux/cpu.h> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 46 | #include <linux/smp.h> |
| 47 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 48 | #include <linux/hw_breakpoint.h> |
| 49 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 50 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 51 | /* |
| 52 | * Constraints data |
| 53 | */ |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 54 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 55 | /* Number of pinned cpu breakpoints in a cpu */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 56 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 57 | |
| 58 | /* Number of pinned task breakpoints in a cpu */ |
Frederic Weisbecker | 777d041 | 2010-05-03 15:39:45 +0200 | [diff] [blame] | 59 | static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 60 | |
| 61 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 62 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 63 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 64 | static int nr_slots[TYPE_MAX]; |
| 65 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 66 | /* Keep track of the breakpoints attached to tasks */ |
| 67 | static LIST_HEAD(bp_task_head); |
| 68 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 69 | static int constraints_initialized; |
| 70 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
| 72 | struct bp_busy_slots { |
| 73 | unsigned int pinned; |
| 74 | unsigned int flexible; |
| 75 | }; |
| 76 | |
| 77 | /* Serialize accesses to the above constraints */ |
| 78 | static DEFINE_MUTEX(nr_bp_mutex); |
| 79 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 80 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
| 81 | { |
| 82 | return 1; |
| 83 | } |
| 84 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 85 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
| 86 | { |
| 87 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) |
| 88 | return TYPE_DATA; |
| 89 | |
| 90 | return TYPE_INST; |
| 91 | } |
| 92 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 93 | /* |
| 94 | * Report the maximum number of pinned breakpoints a task |
| 95 | * have in this cpu |
| 96 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 97 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 98 | { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 99 | int i; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 100 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 101 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 102 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 103 | if (tsk_pinned[i] > 0) |
| 104 | return i + 1; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 105 | } |
| 106 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 107 | return 0; |
| 108 | } |
| 109 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 110 | /* |
| 111 | * Count the number of breakpoints of the same type and same task. |
| 112 | * The given event must be not on the list. |
| 113 | */ |
Michael Neuling | 0d85535 | 2012-10-26 18:28:56 +0200 | [diff] [blame] | 114 | static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 115 | { |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 116 | struct task_struct *tsk = bp->hw.bp_target; |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 117 | struct perf_event *iter; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 118 | int count = 0; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 119 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
Michael Neuling | 0d85535 | 2012-10-26 18:28:56 +0200 | [diff] [blame] | 121 | if (iter->hw.bp_target == tsk && |
| 122 | find_slot_idx(iter) == type && |
Oleg Nesterov | 8b4d801 | 2013-06-20 17:50:06 +0200 | [diff] [blame] | 123 | (iter->cpu < 0 || cpu == iter->cpu)) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 124 | count += hw_breakpoint_weight(iter); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 125 | } |
| 126 | |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 127 | return count; |
| 128 | } |
| 129 | |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 130 | static const struct cpumask *cpumask_of_bp(struct perf_event *bp) |
| 131 | { |
| 132 | if (bp->cpu >= 0) |
| 133 | return cpumask_of(bp->cpu); |
| 134 | return cpu_possible_mask; |
| 135 | } |
| 136 | |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 137 | /* |
| 138 | * Report the number of pinned/un-pinned breakpoints we have in |
| 139 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
| 140 | */ |
| 141 | static void |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 142 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
| 143 | enum bp_type_idx type) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 144 | { |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 145 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 146 | int cpu; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 147 | |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 148 | for_each_cpu(cpu, cpumask) { |
| 149 | unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 150 | |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 151 | if (!bp->hw.bp_target) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 152 | nr += max_task_bp_pinned(cpu, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 153 | else |
Michael Neuling | 0d85535 | 2012-10-26 18:28:56 +0200 | [diff] [blame] | 154 | nr += task_bp_pinned(cpu, bp, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 155 | |
| 156 | if (nr > slots->pinned) |
| 157 | slots->pinned = nr; |
| 158 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 159 | nr = per_cpu(nr_bp_flexible[type], cpu); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 160 | |
| 161 | if (nr > slots->flexible) |
| 162 | slots->flexible = nr; |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | /* |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 167 | * For now, continue to consider flexible as pinned, until we can |
| 168 | * ensure no flexible event can ever be scheduled before a pinned event |
| 169 | * in a same cpu. |
| 170 | */ |
| 171 | static void |
| 172 | fetch_this_slot(struct bp_busy_slots *slots, int weight) |
| 173 | { |
| 174 | slots->pinned += weight; |
| 175 | } |
| 176 | |
| 177 | /* |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 178 | * Add a pinned breakpoint for the given task in our constraint table |
| 179 | */ |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 180 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 181 | enum bp_type_idx type, int weight) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 182 | { |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 183 | /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ |
| 184 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
| 185 | int old_idx, new_idx; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 186 | |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 187 | old_idx = task_bp_pinned(cpu, bp, type) - 1; |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 188 | new_idx = old_idx + weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 189 | |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 190 | if (old_idx >= 0) |
| 191 | tsk_pinned[old_idx]--; |
| 192 | if (new_idx >= 0) |
| 193 | tsk_pinned[new_idx]++; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | /* |
| 197 | * Add/remove the given breakpoint in our constraint table |
| 198 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 199 | static void |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 200 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
| 201 | int weight) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 202 | { |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 203 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 204 | int cpu; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 205 | |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 206 | if (!enable) |
| 207 | weight = -weight; |
| 208 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 209 | /* Pinned counter cpu profiling */ |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 210 | if (!bp->hw.bp_target) { |
| 211 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 212 | return; |
| 213 | } |
| 214 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 215 | /* Pinned counter task profiling */ |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 216 | for_each_cpu(cpu, cpumask) |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 217 | toggle_bp_task_slot(bp, cpu, type, weight); |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 218 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 219 | if (enable) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 220 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 221 | else |
| 222 | list_del(&bp->hw.bp_list); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | /* |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 226 | * Function to perform processor-specific cleanup during unregistration |
| 227 | */ |
| 228 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) |
| 229 | { |
| 230 | /* |
| 231 | * A weak stub function here for those archs that don't define |
| 232 | * it inside arch/.../kernel/hw_breakpoint.c |
| 233 | */ |
| 234 | } |
| 235 | |
| 236 | /* |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 237 | * Contraints to check before allowing this new breakpoint counter: |
| 238 | * |
| 239 | * == Non-pinned counter == (Considered as pinned for now) |
| 240 | * |
| 241 | * - If attached to a single cpu, check: |
| 242 | * |
| 243 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 244 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 245 | * |
| 246 | * -> If there are already non-pinned counters in this cpu, it means |
| 247 | * there is already a free slot for them. |
| 248 | * Otherwise, we check that the maximum number of per task |
| 249 | * breakpoints (for this cpu) plus the number of per cpu breakpoint |
| 250 | * (for this cpu) doesn't cover every registers. |
| 251 | * |
| 252 | * - If attached to every cpus, check: |
| 253 | * |
| 254 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 255 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 256 | * |
| 257 | * -> This is roughly the same, except we check the number of per cpu |
| 258 | * bp for every cpu and we keep the max one. Same for the per tasks |
| 259 | * breakpoints. |
| 260 | * |
| 261 | * |
| 262 | * == Pinned counter == |
| 263 | * |
| 264 | * - If attached to a single cpu, check: |
| 265 | * |
| 266 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 267 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 268 | * |
| 269 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
| 270 | * one register at least (or they will never be fed). |
| 271 | * |
| 272 | * - If attached to every cpus, check: |
| 273 | * |
| 274 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
Stephen Rothwell | 6ab8886 | 2009-12-08 18:25:15 +1100 | [diff] [blame] | 275 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 276 | */ |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 277 | static int __reserve_bp_slot(struct perf_event *bp) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 278 | { |
| 279 | struct bp_busy_slots slots = {0}; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 280 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 281 | int weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 282 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 283 | /* We couldn't initialize breakpoint constraints on boot */ |
| 284 | if (!constraints_initialized) |
| 285 | return -ENOMEM; |
| 286 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 287 | /* Basic checks */ |
| 288 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || |
| 289 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) |
| 290 | return -EINVAL; |
| 291 | |
| 292 | type = find_slot_idx(bp); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 293 | weight = hw_breakpoint_weight(bp); |
| 294 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 295 | fetch_bp_busy_slots(&slots, bp, type); |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 296 | /* |
| 297 | * Simulate the addition of this breakpoint to the constraints |
| 298 | * and see the result. |
| 299 | */ |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 300 | fetch_this_slot(&slots, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 301 | |
| 302 | /* Flexible counters need to keep at least one slot */ |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 303 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 304 | return -ENOSPC; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 305 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 306 | toggle_bp_slot(bp, true, type, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 307 | |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | int reserve_bp_slot(struct perf_event *bp) |
| 312 | { |
| 313 | int ret; |
| 314 | |
| 315 | mutex_lock(&nr_bp_mutex); |
| 316 | |
| 317 | ret = __reserve_bp_slot(bp); |
| 318 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 319 | mutex_unlock(&nr_bp_mutex); |
| 320 | |
| 321 | return ret; |
| 322 | } |
| 323 | |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 324 | static void __release_bp_slot(struct perf_event *bp) |
| 325 | { |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 326 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 327 | int weight; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 328 | |
| 329 | type = find_slot_idx(bp); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 330 | weight = hw_breakpoint_weight(bp); |
| 331 | toggle_bp_slot(bp, false, type, weight); |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 332 | } |
| 333 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 334 | void release_bp_slot(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 335 | { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 336 | mutex_lock(&nr_bp_mutex); |
| 337 | |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 338 | arch_unregister_hw_breakpoint(bp); |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 339 | __release_bp_slot(bp); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 340 | |
| 341 | mutex_unlock(&nr_bp_mutex); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 342 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 343 | |
Jason Wessel | 5352ae6 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 344 | /* |
| 345 | * Allow the kernel debugger to reserve breakpoint slots without |
| 346 | * taking a lock using the dbg_* variant of for the reserve and |
| 347 | * release breakpoint slots. |
| 348 | */ |
| 349 | int dbg_reserve_bp_slot(struct perf_event *bp) |
| 350 | { |
| 351 | if (mutex_is_locked(&nr_bp_mutex)) |
| 352 | return -1; |
| 353 | |
| 354 | return __reserve_bp_slot(bp); |
| 355 | } |
| 356 | |
| 357 | int dbg_release_bp_slot(struct perf_event *bp) |
| 358 | { |
| 359 | if (mutex_is_locked(&nr_bp_mutex)) |
| 360 | return -1; |
| 361 | |
| 362 | __release_bp_slot(bp); |
| 363 | |
| 364 | return 0; |
| 365 | } |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 366 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 367 | static int validate_hw_breakpoint(struct perf_event *bp) |
| 368 | { |
| 369 | int ret; |
| 370 | |
| 371 | ret = arch_validate_hwbkpt_settings(bp); |
| 372 | if (ret) |
| 373 | return ret; |
| 374 | |
| 375 | if (arch_check_bp_in_kernelspace(bp)) { |
| 376 | if (bp->attr.exclude_kernel) |
| 377 | return -EINVAL; |
| 378 | /* |
| 379 | * Don't let unprivileged users set a breakpoint in the trap |
| 380 | * path to avoid trap recursion attacks. |
| 381 | */ |
| 382 | if (!capable(CAP_SYS_ADMIN)) |
| 383 | return -EPERM; |
| 384 | } |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 389 | int register_perf_hw_breakpoint(struct perf_event *bp) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 390 | { |
| 391 | int ret; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 392 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 393 | ret = reserve_bp_slot(bp); |
| 394 | if (ret) |
| 395 | return ret; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 396 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 397 | ret = validate_hw_breakpoint(bp); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 398 | |
Mahesh Salgaonkar | b23ff0e | 2010-01-21 18:25:16 +0530 | [diff] [blame] | 399 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
| 400 | if (ret) |
| 401 | release_bp_slot(bp); |
| 402 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 403 | return ret; |
| 404 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 405 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 406 | /** |
| 407 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 408 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 409 | * @triggered: callback to trigger when we hit the breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 410 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 411 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 412 | struct perf_event * |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 413 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 414 | perf_overflow_handler_t triggered, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 415 | void *context, |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 416 | struct task_struct *tsk) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 417 | { |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 418 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered, |
| 419 | context); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 420 | } |
| 421 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 422 | |
| 423 | /** |
| 424 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 425 | * @bp: the breakpoint structure to modify |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 426 | * @attr: new breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 427 | * @triggered: callback to trigger when we hit the breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 428 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 429 | */ |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 430 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 431 | { |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 432 | u64 old_addr = bp->attr.bp_addr; |
Mahesh Salgaonkar | cd75764 | 2010-01-30 10:25:18 +0530 | [diff] [blame] | 433 | u64 old_len = bp->attr.bp_len; |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 434 | int old_type = bp->attr.bp_type; |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 435 | int err = 0; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 436 | |
K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 437 | /* |
| 438 | * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it |
| 439 | * will not be possible to raise IPIs that invoke __perf_event_disable. |
| 440 | * So call the function directly after making sure we are targeting the |
| 441 | * current task. |
| 442 | */ |
| 443 | if (irqs_disabled() && bp->ctx && bp->ctx->task == current) |
| 444 | __perf_event_disable(bp); |
| 445 | else |
| 446 | perf_event_disable(bp); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 447 | |
| 448 | bp->attr.bp_addr = attr->bp_addr; |
| 449 | bp->attr.bp_type = attr->bp_type; |
| 450 | bp->attr.bp_len = attr->bp_len; |
| 451 | |
| 452 | if (attr->disabled) |
| 453 | goto end; |
| 454 | |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 455 | err = validate_hw_breakpoint(bp); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 456 | if (!err) |
| 457 | perf_event_enable(bp); |
| 458 | |
| 459 | if (err) { |
| 460 | bp->attr.bp_addr = old_addr; |
| 461 | bp->attr.bp_type = old_type; |
| 462 | bp->attr.bp_len = old_len; |
| 463 | if (!bp->attr.disabled) |
| 464 | perf_event_enable(bp); |
| 465 | |
| 466 | return err; |
| 467 | } |
| 468 | |
| 469 | end: |
| 470 | bp->attr.disabled = attr->disabled; |
| 471 | |
| 472 | return 0; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 473 | } |
| 474 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
| 475 | |
| 476 | /** |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 477 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 478 | * @bp: the breakpoint structure to unregister |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 479 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 480 | void unregister_hw_breakpoint(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 481 | { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 482 | if (!bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 483 | return; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 484 | perf_event_release_kernel(bp); |
| 485 | } |
| 486 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
| 487 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 488 | /** |
| 489 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 490 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 491 | * @triggered: callback to trigger when we hit the breakpoint |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 492 | * |
| 493 | * @return a set of per_cpu pointers to perf events |
| 494 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 495 | struct perf_event * __percpu * |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 496 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 497 | perf_overflow_handler_t triggered, |
| 498 | void *context) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 499 | { |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame^] | 500 | struct perf_event * __percpu *cpu_events, *bp; |
| 501 | long err = 0; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 502 | int cpu; |
| 503 | |
| 504 | cpu_events = alloc_percpu(typeof(*cpu_events)); |
| 505 | if (!cpu_events) |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 506 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 507 | |
Li Zefan | 88f7a89 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 508 | get_online_cpus(); |
| 509 | for_each_online_cpu(cpu) { |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 510 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, |
| 511 | triggered, context); |
Frederic Weisbecker | 605bfae | 2009-11-26 05:35:42 +0100 | [diff] [blame] | 512 | if (IS_ERR(bp)) { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 513 | err = PTR_ERR(bp); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 514 | break; |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame^] | 515 | } |
| 516 | |
| 517 | per_cpu(*cpu_events, cpu) = bp; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 518 | } |
Li Zefan | 88f7a89 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 519 | put_online_cpus(); |
| 520 | |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame^] | 521 | if (likely(!err)) |
| 522 | return cpu_events; |
| 523 | |
| 524 | unregister_wide_hw_breakpoint(cpu_events); |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 525 | return (void __percpu __force *)ERR_PTR(err); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 526 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 527 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 528 | |
| 529 | /** |
| 530 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
| 531 | * @cpu_events: the per cpu set of events to unregister |
| 532 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 533 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 534 | { |
| 535 | int cpu; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 536 | |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame^] | 537 | for_each_possible_cpu(cpu) |
| 538 | unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); |
| 539 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 540 | free_percpu(cpu_events); |
| 541 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 542 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 543 | |
| 544 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
| 545 | .notifier_call = hw_breakpoint_exceptions_notify, |
| 546 | /* we need to be notified first */ |
| 547 | .priority = 0x7fffffff |
| 548 | }; |
| 549 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 550 | static void bp_perf_event_destroy(struct perf_event *event) |
| 551 | { |
| 552 | release_bp_slot(event); |
| 553 | } |
| 554 | |
| 555 | static int hw_breakpoint_event_init(struct perf_event *bp) |
| 556 | { |
| 557 | int err; |
| 558 | |
| 559 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) |
| 560 | return -ENOENT; |
| 561 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 562 | /* |
| 563 | * no branch sampling for breakpoint events |
| 564 | */ |
| 565 | if (has_branch_stack(bp)) |
| 566 | return -EOPNOTSUPP; |
| 567 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 568 | err = register_perf_hw_breakpoint(bp); |
| 569 | if (err) |
| 570 | return err; |
| 571 | |
| 572 | bp->destroy = bp_perf_event_destroy; |
| 573 | |
| 574 | return 0; |
| 575 | } |
| 576 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 577 | static int hw_breakpoint_add(struct perf_event *bp, int flags) |
| 578 | { |
| 579 | if (!(flags & PERF_EF_START)) |
| 580 | bp->hw.state = PERF_HES_STOPPED; |
| 581 | |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 582 | if (is_sampling_event(bp)) { |
| 583 | bp->hw.last_period = bp->hw.sample_period; |
| 584 | perf_swevent_set_period(bp); |
| 585 | } |
| 586 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 587 | return arch_install_hw_breakpoint(bp); |
| 588 | } |
| 589 | |
| 590 | static void hw_breakpoint_del(struct perf_event *bp, int flags) |
| 591 | { |
| 592 | arch_uninstall_hw_breakpoint(bp); |
| 593 | } |
| 594 | |
| 595 | static void hw_breakpoint_start(struct perf_event *bp, int flags) |
| 596 | { |
| 597 | bp->hw.state = 0; |
| 598 | } |
| 599 | |
| 600 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) |
| 601 | { |
| 602 | bp->hw.state = PERF_HES_STOPPED; |
| 603 | } |
| 604 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 605 | static int hw_breakpoint_event_idx(struct perf_event *bp) |
| 606 | { |
| 607 | return 0; |
| 608 | } |
| 609 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 610 | static struct pmu perf_breakpoint = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 611 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
| 612 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 613 | .event_init = hw_breakpoint_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 614 | .add = hw_breakpoint_add, |
| 615 | .del = hw_breakpoint_del, |
| 616 | .start = hw_breakpoint_start, |
| 617 | .stop = hw_breakpoint_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 618 | .read = hw_breakpoint_pmu_read, |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 619 | |
| 620 | .event_idx = hw_breakpoint_event_idx, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 621 | }; |
| 622 | |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 623 | int __init init_hw_breakpoint(void) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 624 | { |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 625 | unsigned int **task_bp_pinned; |
| 626 | int cpu, err_cpu; |
| 627 | int i; |
| 628 | |
| 629 | for (i = 0; i < TYPE_MAX; i++) |
| 630 | nr_slots[i] = hw_breakpoint_slots(i); |
| 631 | |
| 632 | for_each_possible_cpu(cpu) { |
| 633 | for (i = 0; i < TYPE_MAX; i++) { |
| 634 | task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); |
| 635 | *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], |
| 636 | GFP_KERNEL); |
| 637 | if (!*task_bp_pinned) |
| 638 | goto err_alloc; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | constraints_initialized = 1; |
| 643 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 644 | perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 645 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 647 | |
| 648 | err_alloc: |
| 649 | for_each_possible_cpu(err_cpu) { |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 650 | for (i = 0; i < TYPE_MAX; i++) |
Daniel Baluta | 02e176a | 2013-02-06 23:29:20 +0200 | [diff] [blame] | 651 | kfree(per_cpu(nr_task_bp_pinned[i], err_cpu)); |
Namhyung Kim | 30ce2f7 | 2012-02-28 10:19:38 +0900 | [diff] [blame] | 652 | if (err_cpu == cpu) |
| 653 | break; |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | return -ENOMEM; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 657 | } |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 658 | |
| 659 | |