Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Performance events - AMD IBS |
| 3 | * |
| 4 | * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter |
| 5 | * |
| 6 | * For licencing details see kernel-base/COPYING |
| 7 | */ |
| 8 | |
| 9 | #include <linux/perf_event.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/pci.h> |
| 12 | |
| 13 | #include <asm/apic.h> |
| 14 | |
| 15 | static u32 ibs_caps; |
| 16 | |
| 17 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
| 18 | |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 19 | #include <linux/kprobes.h> |
| 20 | #include <linux/hardirq.h> |
| 21 | |
| 22 | #include <asm/nmi.h> |
| 23 | |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 24 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) |
| 25 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT |
| 26 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 27 | enum ibs_states { |
| 28 | IBS_ENABLED = 0, |
| 29 | IBS_STARTED = 1, |
| 30 | IBS_STOPPING = 2, |
| 31 | |
| 32 | IBS_MAX_STATES, |
| 33 | }; |
| 34 | |
| 35 | struct cpu_perf_ibs { |
| 36 | struct perf_event *event; |
| 37 | unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)]; |
| 38 | }; |
| 39 | |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 40 | struct perf_ibs { |
| 41 | struct pmu pmu; |
| 42 | unsigned int msr; |
| 43 | u64 config_mask; |
| 44 | u64 cnt_mask; |
| 45 | u64 enable_mask; |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 46 | u64 valid_mask; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 47 | u64 max_period; |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 48 | unsigned long offset_mask[1]; |
| 49 | int offset_max; |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 50 | struct cpu_perf_ibs __percpu *pcpu; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 51 | u64 (*get_count)(u64 config); |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 52 | }; |
| 53 | |
| 54 | struct perf_ibs_data { |
| 55 | u32 size; |
| 56 | union { |
| 57 | u32 data[0]; /* data buffer starts here */ |
| 58 | u32 caps; |
| 59 | }; |
| 60 | u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX]; |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 61 | }; |
| 62 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 63 | static int |
| 64 | perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *count) |
| 65 | { |
| 66 | s64 left = local64_read(&hwc->period_left); |
| 67 | s64 period = hwc->sample_period; |
| 68 | int overflow = 0; |
| 69 | |
| 70 | /* |
| 71 | * If we are way outside a reasonable range then just skip forward: |
| 72 | */ |
| 73 | if (unlikely(left <= -period)) { |
| 74 | left = period; |
| 75 | local64_set(&hwc->period_left, left); |
| 76 | hwc->last_period = period; |
| 77 | overflow = 1; |
| 78 | } |
| 79 | |
| 80 | if (unlikely(left <= 0)) { |
| 81 | left += period; |
| 82 | local64_set(&hwc->period_left, left); |
| 83 | hwc->last_period = period; |
| 84 | overflow = 1; |
| 85 | } |
| 86 | |
| 87 | if (unlikely(left < min)) |
| 88 | left = min; |
| 89 | |
| 90 | if (left > max) |
| 91 | left = max; |
| 92 | |
| 93 | *count = (u64)left; |
| 94 | |
| 95 | return overflow; |
| 96 | } |
| 97 | |
| 98 | static int |
| 99 | perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width) |
| 100 | { |
| 101 | struct hw_perf_event *hwc = &event->hw; |
| 102 | int shift = 64 - width; |
| 103 | u64 prev_raw_count; |
| 104 | u64 delta; |
| 105 | |
| 106 | /* |
| 107 | * Careful: an NMI might modify the previous event value. |
| 108 | * |
| 109 | * Our tactic to handle this is to first atomically read and |
| 110 | * exchange a new raw count - then add that new-prev delta |
| 111 | * count to the generic event atomically: |
| 112 | */ |
| 113 | prev_raw_count = local64_read(&hwc->prev_count); |
| 114 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 115 | new_raw_count) != prev_raw_count) |
| 116 | return 0; |
| 117 | |
| 118 | /* |
| 119 | * Now we have the new raw value and have updated the prev |
| 120 | * timestamp already. We can now calculate the elapsed delta |
| 121 | * (event-)time and add that to the generic event. |
| 122 | * |
| 123 | * Careful, not all hw sign-extends above the physical width |
| 124 | * of the count. |
| 125 | */ |
| 126 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
| 127 | delta >>= shift; |
| 128 | |
| 129 | local64_add(delta, &event->count); |
| 130 | local64_sub(delta, &hwc->period_left); |
| 131 | |
| 132 | return 1; |
| 133 | } |
| 134 | |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 135 | static struct perf_ibs perf_ibs_fetch; |
| 136 | static struct perf_ibs perf_ibs_op; |
| 137 | |
| 138 | static struct perf_ibs *get_ibs_pmu(int type) |
| 139 | { |
| 140 | if (perf_ibs_fetch.pmu.type == type) |
| 141 | return &perf_ibs_fetch; |
| 142 | if (perf_ibs_op.pmu.type == type) |
| 143 | return &perf_ibs_op; |
| 144 | return NULL; |
| 145 | } |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 146 | |
| 147 | static int perf_ibs_init(struct perf_event *event) |
| 148 | { |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 149 | struct hw_perf_event *hwc = &event->hw; |
| 150 | struct perf_ibs *perf_ibs; |
| 151 | u64 max_cnt, config; |
| 152 | |
| 153 | perf_ibs = get_ibs_pmu(event->attr.type); |
| 154 | if (!perf_ibs) |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 155 | return -ENOENT; |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 156 | |
| 157 | config = event->attr.config; |
| 158 | if (config & ~perf_ibs->config_mask) |
| 159 | return -EINVAL; |
| 160 | |
| 161 | if (hwc->sample_period) { |
| 162 | if (config & perf_ibs->cnt_mask) |
| 163 | /* raw max_cnt may not be set */ |
| 164 | return -EINVAL; |
Robert Richter | 6accb9c | 2012-04-02 20:19:10 +0200 | [diff] [blame^] | 165 | if (!event->attr.sample_freq && hwc->sample_period & 0x0f) |
| 166 | /* |
| 167 | * lower 4 bits can not be set in ibs max cnt, |
| 168 | * but allowing it in case we adjust the |
| 169 | * sample period to set a frequency. |
| 170 | */ |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 171 | return -EINVAL; |
Robert Richter | 6accb9c | 2012-04-02 20:19:10 +0200 | [diff] [blame^] | 172 | hwc->sample_period &= ~0x0FULL; |
| 173 | if (!hwc->sample_period) |
| 174 | hwc->sample_period = 0x10; |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 175 | } else { |
| 176 | max_cnt = config & perf_ibs->cnt_mask; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 177 | config &= ~perf_ibs->cnt_mask; |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 178 | event->attr.sample_period = max_cnt << 4; |
| 179 | hwc->sample_period = event->attr.sample_period; |
| 180 | } |
| 181 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 182 | if (!hwc->sample_period) |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 183 | return -EINVAL; |
| 184 | |
Robert Richter | 6accb9c | 2012-04-02 20:19:10 +0200 | [diff] [blame^] | 185 | /* |
| 186 | * If we modify hwc->sample_period, we also need to update |
| 187 | * hwc->last_period and hwc->period_left. |
| 188 | */ |
| 189 | hwc->last_period = hwc->sample_period; |
| 190 | local64_set(&hwc->period_left, hwc->sample_period); |
| 191 | |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 192 | hwc->config_base = perf_ibs->msr; |
| 193 | hwc->config = config; |
| 194 | |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 195 | return 0; |
| 196 | } |
| 197 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 198 | static int perf_ibs_set_period(struct perf_ibs *perf_ibs, |
| 199 | struct hw_perf_event *hwc, u64 *period) |
| 200 | { |
| 201 | int ret; |
| 202 | |
| 203 | /* ignore lower 4 bits in min count: */ |
| 204 | ret = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period); |
| 205 | local64_set(&hwc->prev_count, 0); |
| 206 | |
| 207 | return ret; |
| 208 | } |
| 209 | |
| 210 | static u64 get_ibs_fetch_count(u64 config) |
| 211 | { |
| 212 | return (config & IBS_FETCH_CNT) >> 12; |
| 213 | } |
| 214 | |
| 215 | static u64 get_ibs_op_count(u64 config) |
| 216 | { |
| 217 | return (config & IBS_OP_CUR_CNT) >> 32; |
| 218 | } |
| 219 | |
| 220 | static void |
| 221 | perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, |
| 222 | u64 config) |
| 223 | { |
| 224 | u64 count = perf_ibs->get_count(config); |
| 225 | |
| 226 | while (!perf_event_try_update(event, count, 20)) { |
| 227 | rdmsrl(event->hw.config_base, config); |
| 228 | count = perf_ibs->get_count(config); |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | /* Note: The enable mask must be encoded in the config argument. */ |
| 233 | static inline void perf_ibs_enable_event(struct hw_perf_event *hwc, u64 config) |
| 234 | { |
| 235 | wrmsrl(hwc->config_base, hwc->config | config); |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * We cannot restore the ibs pmu state, so we always needs to update |
| 240 | * the event while stopping it and then reset the state when starting |
| 241 | * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in |
| 242 | * perf_ibs_start()/perf_ibs_stop() and instead always do it. |
| 243 | */ |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 244 | static void perf_ibs_start(struct perf_event *event, int flags) |
| 245 | { |
| 246 | struct hw_perf_event *hwc = &event->hw; |
| 247 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
| 248 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 249 | u64 config; |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 250 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 251 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 252 | return; |
| 253 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 254 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
| 255 | hwc->state = 0; |
| 256 | |
| 257 | perf_ibs_set_period(perf_ibs, hwc, &config); |
| 258 | config = (config >> 4) | perf_ibs->enable_mask; |
| 259 | set_bit(IBS_STARTED, pcpu->state); |
| 260 | perf_ibs_enable_event(hwc, config); |
| 261 | |
| 262 | perf_event_update_userpage(event); |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | static void perf_ibs_stop(struct perf_event *event, int flags) |
| 266 | { |
| 267 | struct hw_perf_event *hwc = &event->hw; |
| 268 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
| 269 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
| 270 | u64 val; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 271 | int stopping; |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 272 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 273 | stopping = test_and_clear_bit(IBS_STARTED, pcpu->state); |
| 274 | |
| 275 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 276 | return; |
| 277 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 278 | rdmsrl(hwc->config_base, val); |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 279 | |
| 280 | if (stopping) { |
| 281 | set_bit(IBS_STOPPING, pcpu->state); |
| 282 | val &= ~perf_ibs->enable_mask; |
| 283 | wrmsrl(hwc->config_base, val); |
| 284 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
| 285 | hwc->state |= PERF_HES_STOPPED; |
| 286 | } |
| 287 | |
| 288 | if (hwc->state & PERF_HES_UPTODATE) |
| 289 | return; |
| 290 | |
| 291 | perf_ibs_event_update(perf_ibs, event, val); |
| 292 | hwc->state |= PERF_HES_UPTODATE; |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 293 | } |
| 294 | |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 295 | static int perf_ibs_add(struct perf_event *event, int flags) |
| 296 | { |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 297 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
| 298 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
| 299 | |
| 300 | if (test_and_set_bit(IBS_ENABLED, pcpu->state)) |
| 301 | return -ENOSPC; |
| 302 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 303 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
| 304 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 305 | pcpu->event = event; |
| 306 | |
| 307 | if (flags & PERF_EF_START) |
| 308 | perf_ibs_start(event, PERF_EF_RELOAD); |
| 309 | |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 310 | return 0; |
| 311 | } |
| 312 | |
| 313 | static void perf_ibs_del(struct perf_event *event, int flags) |
| 314 | { |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 315 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
| 316 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
| 317 | |
| 318 | if (!test_and_clear_bit(IBS_ENABLED, pcpu->state)) |
| 319 | return; |
| 320 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 321 | perf_ibs_stop(event, PERF_EF_UPDATE); |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 322 | |
| 323 | pcpu->event = NULL; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 324 | |
| 325 | perf_event_update_userpage(event); |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 326 | } |
| 327 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 328 | static void perf_ibs_read(struct perf_event *event) { } |
| 329 | |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 330 | static struct perf_ibs perf_ibs_fetch = { |
| 331 | .pmu = { |
| 332 | .task_ctx_nr = perf_invalid_context, |
| 333 | |
| 334 | .event_init = perf_ibs_init, |
| 335 | .add = perf_ibs_add, |
| 336 | .del = perf_ibs_del, |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 337 | .start = perf_ibs_start, |
| 338 | .stop = perf_ibs_stop, |
| 339 | .read = perf_ibs_read, |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 340 | }, |
| 341 | .msr = MSR_AMD64_IBSFETCHCTL, |
| 342 | .config_mask = IBS_FETCH_CONFIG_MASK, |
| 343 | .cnt_mask = IBS_FETCH_MAX_CNT, |
| 344 | .enable_mask = IBS_FETCH_ENABLE, |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 345 | .valid_mask = IBS_FETCH_VAL, |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 346 | .max_period = IBS_FETCH_MAX_CNT << 4, |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 347 | .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK }, |
| 348 | .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT, |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 349 | |
| 350 | .get_count = get_ibs_fetch_count, |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 351 | }; |
| 352 | |
| 353 | static struct perf_ibs perf_ibs_op = { |
| 354 | .pmu = { |
| 355 | .task_ctx_nr = perf_invalid_context, |
| 356 | |
| 357 | .event_init = perf_ibs_init, |
| 358 | .add = perf_ibs_add, |
| 359 | .del = perf_ibs_del, |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 360 | .start = perf_ibs_start, |
| 361 | .stop = perf_ibs_stop, |
| 362 | .read = perf_ibs_read, |
Robert Richter | 5104194 | 2011-12-15 17:56:36 +0100 | [diff] [blame] | 363 | }, |
| 364 | .msr = MSR_AMD64_IBSOPCTL, |
| 365 | .config_mask = IBS_OP_CONFIG_MASK, |
| 366 | .cnt_mask = IBS_OP_MAX_CNT, |
| 367 | .enable_mask = IBS_OP_ENABLE, |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 368 | .valid_mask = IBS_OP_VAL, |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 369 | .max_period = IBS_OP_MAX_CNT << 4, |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 370 | .offset_mask = { MSR_AMD64_IBSOP_REG_MASK }, |
| 371 | .offset_max = MSR_AMD64_IBSOP_REG_COUNT, |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 372 | |
| 373 | .get_count = get_ibs_op_count, |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 374 | }; |
| 375 | |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 376 | static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) |
| 377 | { |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 378 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
| 379 | struct perf_event *event = pcpu->event; |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 380 | struct hw_perf_event *hwc = &event->hw; |
| 381 | struct perf_sample_data data; |
| 382 | struct perf_raw_record raw; |
| 383 | struct pt_regs regs; |
| 384 | struct perf_ibs_data ibs_data; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 385 | int offset, size, overflow, reenable; |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 386 | unsigned int msr; |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 387 | u64 *buf, config; |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 388 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 389 | if (!test_bit(IBS_STARTED, pcpu->state)) { |
| 390 | /* Catch spurious interrupts after stopping IBS: */ |
| 391 | if (!test_and_clear_bit(IBS_STOPPING, pcpu->state)) |
| 392 | return 0; |
| 393 | rdmsrl(perf_ibs->msr, *ibs_data.regs); |
| 394 | return (*ibs_data.regs & perf_ibs->valid_mask) ? 1 : 0; |
| 395 | } |
| 396 | |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 397 | msr = hwc->config_base; |
| 398 | buf = ibs_data.regs; |
| 399 | rdmsrl(msr, *buf); |
| 400 | if (!(*buf++ & perf_ibs->valid_mask)) |
| 401 | return 0; |
| 402 | |
Robert Richter | c75841a | 2012-04-02 20:19:07 +0200 | [diff] [blame] | 403 | /* |
| 404 | * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not |
| 405 | * supported in all cpus. As this triggered an interrupt, we |
| 406 | * set the current count to the max count. |
| 407 | */ |
| 408 | config = ibs_data.regs[0]; |
| 409 | if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) { |
| 410 | config &= ~IBS_OP_CUR_CNT; |
| 411 | config |= (config & IBS_OP_MAX_CNT) << 36; |
| 412 | } |
| 413 | |
| 414 | perf_ibs_event_update(perf_ibs, event, config); |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 415 | perf_sample_data_init(&data, 0, hwc->last_period); |
Robert Richter | c75841a | 2012-04-02 20:19:07 +0200 | [diff] [blame] | 416 | |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 417 | if (event->attr.sample_type & PERF_SAMPLE_RAW) { |
| 418 | ibs_data.caps = ibs_caps; |
| 419 | size = 1; |
| 420 | offset = 1; |
| 421 | do { |
| 422 | rdmsrl(msr + offset, *buf++); |
| 423 | size++; |
| 424 | offset = find_next_bit(perf_ibs->offset_mask, |
| 425 | perf_ibs->offset_max, |
| 426 | offset + 1); |
| 427 | } while (offset < perf_ibs->offset_max); |
| 428 | raw.size = sizeof(u32) + sizeof(u64) * size; |
| 429 | raw.data = ibs_data.data; |
| 430 | data.raw = &raw; |
| 431 | } |
| 432 | |
| 433 | regs = *iregs; /* XXX: update ip from ibs sample */ |
| 434 | |
Robert Richter | db98c5f | 2011-12-15 17:56:39 +0100 | [diff] [blame] | 435 | overflow = perf_ibs_set_period(perf_ibs, hwc, &config); |
| 436 | reenable = !(overflow && perf_event_overflow(event, &data, ®s)); |
| 437 | config = (config >> 4) | (reenable ? perf_ibs->enable_mask : 0); |
| 438 | perf_ibs_enable_event(hwc, config); |
| 439 | |
| 440 | perf_event_update_userpage(event); |
Robert Richter | b7074f1 | 2011-12-15 17:56:37 +0100 | [diff] [blame] | 441 | |
| 442 | return 1; |
| 443 | } |
| 444 | |
| 445 | static int __kprobes |
| 446 | perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
| 447 | { |
| 448 | int handled = 0; |
| 449 | |
| 450 | handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs); |
| 451 | handled += perf_ibs_handle_irq(&perf_ibs_op, regs); |
| 452 | |
| 453 | if (handled) |
| 454 | inc_irq_stat(apic_perf_irqs); |
| 455 | |
| 456 | return handled; |
| 457 | } |
| 458 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 459 | static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) |
| 460 | { |
| 461 | struct cpu_perf_ibs __percpu *pcpu; |
| 462 | int ret; |
| 463 | |
| 464 | pcpu = alloc_percpu(struct cpu_perf_ibs); |
| 465 | if (!pcpu) |
| 466 | return -ENOMEM; |
| 467 | |
| 468 | perf_ibs->pcpu = pcpu; |
| 469 | |
| 470 | ret = perf_pmu_register(&perf_ibs->pmu, name, -1); |
| 471 | if (ret) { |
| 472 | perf_ibs->pcpu = NULL; |
| 473 | free_percpu(pcpu); |
| 474 | } |
| 475 | |
| 476 | return ret; |
| 477 | } |
| 478 | |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 479 | static __init int perf_event_ibs_init(void) |
| 480 | { |
| 481 | if (!ibs_caps) |
| 482 | return -ENODEV; /* ibs not supported by the cpu */ |
| 483 | |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 484 | perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); |
Robert Richter | 7bf3523 | 2012-04-02 20:19:09 +0200 | [diff] [blame] | 485 | if (ibs_caps & IBS_CAPS_OPCNT) |
| 486 | perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; |
Robert Richter | 4db2e8e | 2011-12-15 17:56:38 +0100 | [diff] [blame] | 487 | perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); |
Ingo Molnar | fab0699 | 2012-04-25 12:55:22 +0200 | [diff] [blame] | 488 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 489 | printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ |
| 495 | |
| 496 | static __init int perf_event_ibs_init(void) { return 0; } |
| 497 | |
| 498 | #endif |
| 499 | |
| 500 | /* IBS - apic initialization, for perf and oprofile */ |
| 501 | |
| 502 | static __init u32 __get_ibs_caps(void) |
| 503 | { |
| 504 | u32 caps; |
| 505 | unsigned int max_level; |
| 506 | |
| 507 | if (!boot_cpu_has(X86_FEATURE_IBS)) |
| 508 | return 0; |
| 509 | |
| 510 | /* check IBS cpuid feature flags */ |
| 511 | max_level = cpuid_eax(0x80000000); |
| 512 | if (max_level < IBS_CPUID_FEATURES) |
| 513 | return IBS_CAPS_DEFAULT; |
| 514 | |
| 515 | caps = cpuid_eax(IBS_CPUID_FEATURES); |
| 516 | if (!(caps & IBS_CAPS_AVAIL)) |
| 517 | /* cpuid flags not valid */ |
| 518 | return IBS_CAPS_DEFAULT; |
| 519 | |
| 520 | return caps; |
| 521 | } |
| 522 | |
| 523 | u32 get_ibs_caps(void) |
| 524 | { |
| 525 | return ibs_caps; |
| 526 | } |
| 527 | |
| 528 | EXPORT_SYMBOL(get_ibs_caps); |
| 529 | |
| 530 | static inline int get_eilvt(int offset) |
| 531 | { |
| 532 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); |
| 533 | } |
| 534 | |
| 535 | static inline int put_eilvt(int offset) |
| 536 | { |
| 537 | return !setup_APIC_eilvt(offset, 0, 0, 1); |
| 538 | } |
| 539 | |
| 540 | /* |
| 541 | * Check and reserve APIC extended interrupt LVT offset for IBS if available. |
| 542 | */ |
| 543 | static inline int ibs_eilvt_valid(void) |
| 544 | { |
| 545 | int offset; |
| 546 | u64 val; |
| 547 | int valid = 0; |
| 548 | |
| 549 | preempt_disable(); |
| 550 | |
| 551 | rdmsrl(MSR_AMD64_IBSCTL, val); |
| 552 | offset = val & IBSCTL_LVT_OFFSET_MASK; |
| 553 | |
| 554 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { |
| 555 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", |
| 556 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); |
| 557 | goto out; |
| 558 | } |
| 559 | |
| 560 | if (!get_eilvt(offset)) { |
| 561 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", |
| 562 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); |
| 563 | goto out; |
| 564 | } |
| 565 | |
| 566 | valid = 1; |
| 567 | out: |
| 568 | preempt_enable(); |
| 569 | |
| 570 | return valid; |
| 571 | } |
| 572 | |
| 573 | static int setup_ibs_ctl(int ibs_eilvt_off) |
| 574 | { |
| 575 | struct pci_dev *cpu_cfg; |
| 576 | int nodes; |
| 577 | u32 value = 0; |
| 578 | |
| 579 | nodes = 0; |
| 580 | cpu_cfg = NULL; |
| 581 | do { |
| 582 | cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD, |
| 583 | PCI_DEVICE_ID_AMD_10H_NB_MISC, |
| 584 | cpu_cfg); |
| 585 | if (!cpu_cfg) |
| 586 | break; |
| 587 | ++nodes; |
| 588 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off |
| 589 | | IBSCTL_LVT_OFFSET_VALID); |
| 590 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
| 591 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { |
| 592 | pci_dev_put(cpu_cfg); |
| 593 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
| 594 | "IBSCTL = 0x%08x\n", value); |
| 595 | return -EINVAL; |
| 596 | } |
| 597 | } while (1); |
| 598 | |
| 599 | if (!nodes) { |
| 600 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); |
| 601 | return -ENODEV; |
| 602 | } |
| 603 | |
| 604 | return 0; |
| 605 | } |
| 606 | |
| 607 | /* |
| 608 | * This runs only on the current cpu. We try to find an LVT offset and |
| 609 | * setup the local APIC. For this we must disable preemption. On |
| 610 | * success we initialize all nodes with this offset. This updates then |
| 611 | * the offset in the IBS_CTL per-node msr. The per-core APIC setup of |
| 612 | * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that |
| 613 | * is using the new offset. |
| 614 | */ |
| 615 | static int force_ibs_eilvt_setup(void) |
| 616 | { |
| 617 | int offset; |
| 618 | int ret; |
| 619 | |
| 620 | preempt_disable(); |
| 621 | /* find the next free available EILVT entry, skip offset 0 */ |
| 622 | for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { |
| 623 | if (get_eilvt(offset)) |
| 624 | break; |
| 625 | } |
| 626 | preempt_enable(); |
| 627 | |
| 628 | if (offset == APIC_EILVT_NR_MAX) { |
| 629 | printk(KERN_DEBUG "No EILVT entry available\n"); |
| 630 | return -EBUSY; |
| 631 | } |
| 632 | |
| 633 | ret = setup_ibs_ctl(offset); |
| 634 | if (ret) |
| 635 | goto out; |
| 636 | |
| 637 | if (!ibs_eilvt_valid()) { |
| 638 | ret = -EFAULT; |
| 639 | goto out; |
| 640 | } |
| 641 | |
Robert Richter | 16e5294 | 2011-11-08 19:20:44 +0100 | [diff] [blame] | 642 | pr_info("IBS: LVT offset %d assigned\n", offset); |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 643 | |
| 644 | return 0; |
| 645 | out: |
| 646 | preempt_disable(); |
| 647 | put_eilvt(offset); |
| 648 | preempt_enable(); |
| 649 | return ret; |
| 650 | } |
| 651 | |
| 652 | static inline int get_ibs_lvt_offset(void) |
| 653 | { |
| 654 | u64 val; |
| 655 | |
| 656 | rdmsrl(MSR_AMD64_IBSCTL, val); |
| 657 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) |
| 658 | return -EINVAL; |
| 659 | |
| 660 | return val & IBSCTL_LVT_OFFSET_MASK; |
| 661 | } |
| 662 | |
| 663 | static void setup_APIC_ibs(void *dummy) |
| 664 | { |
| 665 | int offset; |
| 666 | |
| 667 | offset = get_ibs_lvt_offset(); |
| 668 | if (offset < 0) |
| 669 | goto failed; |
| 670 | |
| 671 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) |
| 672 | return; |
| 673 | failed: |
| 674 | pr_warn("perf: IBS APIC setup failed on cpu #%d\n", |
| 675 | smp_processor_id()); |
| 676 | } |
| 677 | |
| 678 | static void clear_APIC_ibs(void *dummy) |
| 679 | { |
| 680 | int offset; |
| 681 | |
| 682 | offset = get_ibs_lvt_offset(); |
| 683 | if (offset >= 0) |
| 684 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
| 685 | } |
| 686 | |
| 687 | static int __cpuinit |
| 688 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
| 689 | { |
| 690 | switch (action & ~CPU_TASKS_FROZEN) { |
| 691 | case CPU_STARTING: |
| 692 | setup_APIC_ibs(NULL); |
| 693 | break; |
| 694 | case CPU_DYING: |
| 695 | clear_APIC_ibs(NULL); |
| 696 | break; |
| 697 | default: |
| 698 | break; |
| 699 | } |
| 700 | |
| 701 | return NOTIFY_OK; |
| 702 | } |
| 703 | |
| 704 | static __init int amd_ibs_init(void) |
| 705 | { |
| 706 | u32 caps; |
Robert Richter | 16e5294 | 2011-11-08 19:20:44 +0100 | [diff] [blame] | 707 | int ret = -EINVAL; |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 708 | |
| 709 | caps = __get_ibs_caps(); |
| 710 | if (!caps) |
| 711 | return -ENODEV; /* ibs not supported by the cpu */ |
| 712 | |
Robert Richter | 16e5294 | 2011-11-08 19:20:44 +0100 | [diff] [blame] | 713 | /* |
| 714 | * Force LVT offset assignment for family 10h: The offsets are |
| 715 | * not assigned by the BIOS for this family, so the OS is |
| 716 | * responsible for doing it. If the OS assignment fails, fall |
| 717 | * back to BIOS settings and try to setup this. |
| 718 | */ |
| 719 | if (boot_cpu_data.x86 == 0x10) |
| 720 | force_ibs_eilvt_setup(); |
| 721 | |
| 722 | if (!ibs_eilvt_valid()) |
| 723 | goto out; |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 724 | |
| 725 | get_online_cpus(); |
| 726 | ibs_caps = caps; |
| 727 | /* make ibs_caps visible to other cpus: */ |
| 728 | smp_mb(); |
| 729 | perf_cpu_notifier(perf_ibs_cpu_notifier); |
| 730 | smp_call_function(setup_APIC_ibs, NULL, 1); |
| 731 | put_online_cpus(); |
| 732 | |
Robert Richter | 16e5294 | 2011-11-08 19:20:44 +0100 | [diff] [blame] | 733 | ret = perf_event_ibs_init(); |
| 734 | out: |
| 735 | if (ret) |
| 736 | pr_err("Failed to setup IBS, %d\n", ret); |
| 737 | return ret; |
Robert Richter | b716916 | 2011-09-21 11:30:18 +0200 | [diff] [blame] | 738 | } |
| 739 | |
| 740 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ |
| 741 | device_initcall(amd_ibs_init); |