Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 1 | #undef DEBUG |
| 2 | |
| 3 | /* |
| 4 | * ARM performance counter support. |
| 5 | * |
| 6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles |
Will Deacon | 43eab87 | 2010-11-13 19:04:32 +0000 | [diff] [blame] | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
Jean PIHET | 796d129 | 2010-01-26 18:51:05 +0100 | [diff] [blame] | 8 | * |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
Mark Rutland | d39976f | 2014-09-29 17:15:32 +0100 | [diff] [blame] | 10 | * on the x86 code. |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 11 | */ |
| 12 | #define pr_fmt(fmt) "hw perfevents: " fmt |
| 13 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 14 | #include <linux/bitmap.h> |
Mark Rutland | cc88116 | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 16 | #include <linux/cpu_pm.h> |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 17 | #include <linux/export.h> |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 18 | #include <linux/kernel.h> |
Sudeep Holla | bc1e3c4 | 2015-06-30 13:56:57 +0100 | [diff] [blame] | 19 | #include <linux/of_device.h> |
Mark Rutland | fa8ad78 | 2015-07-06 12:23:53 +0100 | [diff] [blame] | 20 | #include <linux/perf/arm_pmu.h> |
Will Deacon | 49c006b | 2010-04-29 17:13:24 +0100 | [diff] [blame] | 21 | #include <linux/platform_device.h> |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 22 | #include <linux/slab.h> |
| 23 | #include <linux/spinlock.h> |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 24 | #include <linux/irq.h> |
| 25 | #include <linux/irqdesc.h> |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 26 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 27 | #include <asm/cputype.h> |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 28 | #include <asm/irq_regs.h> |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 29 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 30 | #define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING |
| 31 | #define USE_CPUHP_STR "AP_PERF_ARM_STARTING" |
| 32 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 33 | static int |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 34 | armpmu_map_cache_event(const unsigned (*cache_map) |
| 35 | [PERF_COUNT_HW_CACHE_MAX] |
| 36 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 37 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
| 38 | u64 config) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 39 | { |
| 40 | unsigned int cache_type, cache_op, cache_result, ret; |
| 41 | |
| 42 | cache_type = (config >> 0) & 0xff; |
| 43 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
| 44 | return -EINVAL; |
| 45 | |
| 46 | cache_op = (config >> 8) & 0xff; |
| 47 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
| 48 | return -EINVAL; |
| 49 | |
| 50 | cache_result = (config >> 16) & 0xff; |
| 51 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
| 52 | return -EINVAL; |
| 53 | |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 54 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 55 | |
| 56 | if (ret == CACHE_OP_UNSUPPORTED) |
| 57 | return -ENOENT; |
| 58 | |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | static int |
Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 63 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
Will Deacon | 84fee97 | 2010-11-13 17:13:56 +0000 | [diff] [blame] | 64 | { |
Stephen Boyd | d9f9663 | 2013-08-08 18:41:59 +0100 | [diff] [blame] | 65 | int mapping; |
| 66 | |
| 67 | if (config >= PERF_COUNT_HW_MAX) |
| 68 | return -EINVAL; |
| 69 | |
| 70 | mapping = (*event_map)[config]; |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 71 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
Will Deacon | 84fee97 | 2010-11-13 17:13:56 +0000 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | static int |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 75 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
Will Deacon | 84fee97 | 2010-11-13 17:13:56 +0000 | [diff] [blame] | 76 | { |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 77 | return (int)(config & raw_event_mask); |
| 78 | } |
| 79 | |
Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 80 | int |
| 81 | armpmu_map_event(struct perf_event *event, |
| 82 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], |
| 83 | const unsigned (*cache_map) |
| 84 | [PERF_COUNT_HW_CACHE_MAX] |
| 85 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 86 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
| 87 | u32 raw_event_mask) |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 88 | { |
| 89 | u64 config = event->attr.config; |
Mark Rutland | 67b4305 | 2012-09-12 10:53:23 +0100 | [diff] [blame] | 90 | int type = event->attr.type; |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 91 | |
Mark Rutland | 67b4305 | 2012-09-12 10:53:23 +0100 | [diff] [blame] | 92 | if (type == event->pmu->type) |
| 93 | return armpmu_map_raw_event(raw_event_mask, config); |
| 94 | |
| 95 | switch (type) { |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 96 | case PERF_TYPE_HARDWARE: |
Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 97 | return armpmu_map_hw_event(event_map, config); |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 98 | case PERF_TYPE_HW_CACHE: |
| 99 | return armpmu_map_cache_event(cache_map, config); |
| 100 | case PERF_TYPE_RAW: |
| 101 | return armpmu_map_raw_event(raw_event_mask, config); |
| 102 | } |
| 103 | |
| 104 | return -ENOENT; |
Will Deacon | 84fee97 | 2010-11-13 17:13:56 +0000 | [diff] [blame] | 105 | } |
| 106 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 107 | int armpmu_event_set_period(struct perf_event *event) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 108 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 109 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 110 | struct hw_perf_event *hwc = &event->hw; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 111 | s64 left = local64_read(&hwc->period_left); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 112 | s64 period = hwc->sample_period; |
| 113 | int ret = 0; |
| 114 | |
| 115 | if (unlikely(left <= -period)) { |
| 116 | left = period; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 117 | local64_set(&hwc->period_left, left); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 118 | hwc->last_period = period; |
| 119 | ret = 1; |
| 120 | } |
| 121 | |
| 122 | if (unlikely(left <= 0)) { |
| 123 | left += period; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 124 | local64_set(&hwc->period_left, left); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 125 | hwc->last_period = period; |
| 126 | ret = 1; |
| 127 | } |
| 128 | |
Daniel Thompson | 2d9ed74 | 2015-01-05 15:58:54 +0100 | [diff] [blame] | 129 | /* |
| 130 | * Limit the maximum period to prevent the counter value |
| 131 | * from overtaking the one we are about to program. In |
| 132 | * effect we are reducing max_period to account for |
| 133 | * interrupt latency (and we are being very conservative). |
| 134 | */ |
| 135 | if (left > (armpmu->max_period >> 1)) |
| 136 | left = armpmu->max_period >> 1; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 137 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 138 | local64_set(&hwc->prev_count, (u64)-left); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 139 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 140 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 141 | |
| 142 | perf_event_update_userpage(event); |
| 143 | |
| 144 | return ret; |
| 145 | } |
| 146 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 147 | u64 armpmu_event_update(struct perf_event *event) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 148 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 149 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 150 | struct hw_perf_event *hwc = &event->hw; |
Will Deacon | a737823 | 2011-03-25 17:12:37 +0100 | [diff] [blame] | 151 | u64 delta, prev_raw_count, new_raw_count; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 152 | |
| 153 | again: |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 154 | prev_raw_count = local64_read(&hwc->prev_count); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 155 | new_raw_count = armpmu->read_counter(event); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 156 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 157 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 158 | new_raw_count) != prev_raw_count) |
| 159 | goto again; |
| 160 | |
Will Deacon | 5727347 | 2012-03-06 17:33:17 +0100 | [diff] [blame] | 161 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 162 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 163 | local64_add(delta, &event->count); |
| 164 | local64_sub(delta, &hwc->period_left); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 165 | |
| 166 | return new_raw_count; |
| 167 | } |
| 168 | |
| 169 | static void |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 170 | armpmu_read(struct perf_event *event) |
| 171 | { |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 172 | armpmu_event_update(event); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | static void |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 176 | armpmu_stop(struct perf_event *event, int flags) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 177 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 178 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 179 | struct hw_perf_event *hwc = &event->hw; |
| 180 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 181 | /* |
| 182 | * ARM pmu always has to update the counter, so ignore |
| 183 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
| 184 | */ |
| 185 | if (!(hwc->state & PERF_HES_STOPPED)) { |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 186 | armpmu->disable(event); |
| 187 | armpmu_event_update(event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 188 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
| 189 | } |
| 190 | } |
| 191 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 192 | static void armpmu_start(struct perf_event *event, int flags) |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 193 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 194 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 195 | struct hw_perf_event *hwc = &event->hw; |
| 196 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 197 | /* |
| 198 | * ARM pmu always has to reprogram the period, so ignore |
| 199 | * PERF_EF_RELOAD, see the comment below. |
| 200 | */ |
| 201 | if (flags & PERF_EF_RELOAD) |
| 202 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
| 203 | |
| 204 | hwc->state = 0; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 205 | /* |
| 206 | * Set the period again. Some counters can't be stopped, so when we |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 207 | * were stopped we simply disabled the IRQ source and the counter |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 208 | * may have been left counting. If we don't do this step then we may |
| 209 | * get an interrupt too soon or *way* too late if the overflow has |
| 210 | * happened since disabling. |
| 211 | */ |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 212 | armpmu_event_set_period(event); |
| 213 | armpmu->enable(event); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 214 | } |
| 215 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 216 | static void |
| 217 | armpmu_del(struct perf_event *event, int flags) |
| 218 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 219 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Mark Rutland | 1167925 | 2014-05-13 19:36:31 +0100 | [diff] [blame] | 220 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 221 | struct hw_perf_event *hwc = &event->hw; |
| 222 | int idx = hwc->idx; |
| 223 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 224 | armpmu_stop(event, PERF_EF_UPDATE); |
Mark Rutland | 8be3f9a | 2011-05-17 11:20:11 +0100 | [diff] [blame] | 225 | hw_events->events[idx] = NULL; |
| 226 | clear_bit(idx, hw_events->used_mask); |
Stephen Boyd | eab443e | 2014-02-07 21:01:22 +0000 | [diff] [blame] | 227 | if (armpmu->clear_event_idx) |
| 228 | armpmu->clear_event_idx(hw_events, event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 229 | |
| 230 | perf_event_update_userpage(event); |
| 231 | } |
| 232 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 233 | static int |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 234 | armpmu_add(struct perf_event *event, int flags) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 235 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 236 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Mark Rutland | 1167925 | 2014-05-13 19:36:31 +0100 | [diff] [blame] | 237 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 238 | struct hw_perf_event *hwc = &event->hw; |
| 239 | int idx; |
| 240 | int err = 0; |
| 241 | |
Mark Rutland | cc88116 | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 242 | /* An event following a process won't be stopped earlier */ |
| 243 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) |
| 244 | return -ENOENT; |
| 245 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 246 | perf_pmu_disable(event->pmu); |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 247 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 248 | /* If we don't have a space for the counter then finish early. */ |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 249 | idx = armpmu->get_event_idx(hw_events, event); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 250 | if (idx < 0) { |
| 251 | err = idx; |
| 252 | goto out; |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * If there is an event in the counter we are going to use then make |
| 257 | * sure it is disabled. |
| 258 | */ |
| 259 | event->hw.idx = idx; |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 260 | armpmu->disable(event); |
Mark Rutland | 8be3f9a | 2011-05-17 11:20:11 +0100 | [diff] [blame] | 261 | hw_events->events[idx] = event; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 262 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 263 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
| 264 | if (flags & PERF_EF_START) |
| 265 | armpmu_start(event, PERF_EF_RELOAD); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 266 | |
| 267 | /* Propagate our changes to the userspace mapping. */ |
| 268 | perf_event_update_userpage(event); |
| 269 | |
| 270 | out: |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 271 | perf_pmu_enable(event->pmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 272 | return err; |
| 273 | } |
| 274 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 275 | static int |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 276 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
| 277 | struct perf_event *event) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 278 | { |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 279 | struct arm_pmu *armpmu; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 280 | |
Will Deacon | c95eb31 | 2013-08-07 23:39:41 +0100 | [diff] [blame] | 281 | if (is_software_event(event)) |
| 282 | return 1; |
| 283 | |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 284 | /* |
| 285 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The |
| 286 | * core perf code won't check that the pmu->ctx == leader->ctx |
| 287 | * until after pmu->event_init(event). |
| 288 | */ |
| 289 | if (event->pmu != pmu) |
| 290 | return 0; |
| 291 | |
Will Deacon | 2dfcb80 | 2013-10-09 13:51:29 +0100 | [diff] [blame] | 292 | if (event->state < PERF_EVENT_STATE_OFF) |
Will Deacon | cb2d8b3 | 2013-04-12 19:04:19 +0100 | [diff] [blame] | 293 | return 1; |
| 294 | |
| 295 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) |
Will Deacon | 65b4711 | 2010-09-02 09:32:08 +0100 | [diff] [blame] | 296 | return 1; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 297 | |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 298 | armpmu = to_arm_pmu(event->pmu); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 299 | return armpmu->get_event_idx(hw_events, event) >= 0; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | static int |
| 303 | validate_group(struct perf_event *event) |
| 304 | { |
| 305 | struct perf_event *sibling, *leader = event->group_leader; |
Mark Rutland | 8be3f9a | 2011-05-17 11:20:11 +0100 | [diff] [blame] | 306 | struct pmu_hw_events fake_pmu; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 307 | |
Will Deacon | bce34d1 | 2011-11-17 15:05:14 +0000 | [diff] [blame] | 308 | /* |
| 309 | * Initialise the fake PMU. We only need to populate the |
| 310 | * used_mask for the purposes of validation. |
| 311 | */ |
Mark Rutland | a456084 | 2014-05-13 19:08:19 +0100 | [diff] [blame] | 312 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 313 | |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 314 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
Peter Zijlstra | aa2bc1a | 2011-11-09 17:56:37 +0100 | [diff] [blame] | 315 | return -EINVAL; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 316 | |
| 317 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 318 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
Peter Zijlstra | aa2bc1a | 2011-11-09 17:56:37 +0100 | [diff] [blame] | 319 | return -EINVAL; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 320 | } |
| 321 | |
Suzuki K. Poulose | e429817 | 2015-03-17 18:14:58 +0000 | [diff] [blame] | 322 | if (!validate_event(event->pmu, &fake_pmu, event)) |
Peter Zijlstra | aa2bc1a | 2011-11-09 17:56:37 +0100 | [diff] [blame] | 323 | return -EINVAL; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 324 | |
| 325 | return 0; |
| 326 | } |
| 327 | |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 328 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 329 | { |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 330 | struct arm_pmu *armpmu; |
| 331 | struct platform_device *plat_device; |
| 332 | struct arm_pmu_platdata *plat; |
Will Deacon | 5f5092e | 2014-02-11 18:08:41 +0000 | [diff] [blame] | 333 | int ret; |
| 334 | u64 start_clock, finish_clock; |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 335 | |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 336 | /* |
| 337 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but |
| 338 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will |
| 339 | * do any necessary shifting, we just need to perform the first |
| 340 | * dereference. |
| 341 | */ |
| 342 | armpmu = *(void **)dev; |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 343 | plat_device = armpmu->plat_device; |
| 344 | plat = dev_get_platdata(&plat_device->dev); |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 345 | |
Will Deacon | 5f5092e | 2014-02-11 18:08:41 +0000 | [diff] [blame] | 346 | start_clock = sched_clock(); |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 347 | if (plat && plat->handle_irq) |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 348 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 349 | else |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 350 | ret = armpmu->handle_irq(irq, armpmu); |
Will Deacon | 5f5092e | 2014-02-11 18:08:41 +0000 | [diff] [blame] | 351 | finish_clock = sched_clock(); |
| 352 | |
| 353 | perf_sample_event_took(finish_clock - start_clock); |
| 354 | return ret; |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 355 | } |
| 356 | |
Will Deacon | 0b390e2 | 2011-07-27 15:18:59 +0100 | [diff] [blame] | 357 | static void |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 358 | armpmu_release_hardware(struct arm_pmu *armpmu) |
Will Deacon | 0b390e2 | 2011-07-27 15:18:59 +0100 | [diff] [blame] | 359 | { |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 360 | armpmu->free_irq(armpmu); |
Will Deacon | 0b390e2 | 2011-07-27 15:18:59 +0100 | [diff] [blame] | 361 | } |
| 362 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 363 | static int |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 364 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 365 | { |
Mark Rutland | ed61f98 | 2015-05-26 17:23:34 +0100 | [diff] [blame] | 366 | int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 367 | if (err) { |
| 368 | armpmu_release_hardware(armpmu); |
| 369 | return err; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 370 | } |
| 371 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 372 | armpmu->pmu_state = ARM_PMU_STATE_RUNNING; |
| 373 | |
Will Deacon | 0b390e2 | 2011-07-27 15:18:59 +0100 | [diff] [blame] | 374 | return 0; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 375 | } |
| 376 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 377 | static void |
| 378 | hw_perf_event_destroy(struct perf_event *event) |
| 379 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 380 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 381 | atomic_t *active_events = &armpmu->active_events; |
| 382 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; |
| 383 | |
| 384 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 385 | armpmu_release_hardware(armpmu); |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 386 | mutex_unlock(pmu_reserve_mutex); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 387 | } |
| 388 | } |
| 389 | |
| 390 | static int |
Will Deacon | 05d22fd | 2011-07-19 11:57:30 +0100 | [diff] [blame] | 391 | event_requires_mode_exclusion(struct perf_event_attr *attr) |
| 392 | { |
| 393 | return attr->exclude_idle || attr->exclude_user || |
| 394 | attr->exclude_kernel || attr->exclude_hv; |
| 395 | } |
| 396 | |
| 397 | static int |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 398 | __hw_perf_event_init(struct perf_event *event) |
| 399 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 400 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 401 | struct hw_perf_event *hwc = &event->hw; |
Mark Rutland | 9dcbf46 | 2013-01-18 16:10:06 +0000 | [diff] [blame] | 402 | int mapping; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 403 | |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 404 | mapping = armpmu->map_event(event); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 405 | |
| 406 | if (mapping < 0) { |
| 407 | pr_debug("event %x:%llx not supported\n", event->attr.type, |
| 408 | event->attr.config); |
| 409 | return mapping; |
| 410 | } |
| 411 | |
| 412 | /* |
Will Deacon | 05d22fd | 2011-07-19 11:57:30 +0100 | [diff] [blame] | 413 | * We don't assign an index until we actually place the event onto |
| 414 | * hardware. Use -1 to signify that we haven't decided where to put it |
| 415 | * yet. For SMP systems, each core has it's own PMU so we can't do any |
| 416 | * clever allocation or constraints checking at this point. |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 417 | */ |
Will Deacon | 05d22fd | 2011-07-19 11:57:30 +0100 | [diff] [blame] | 418 | hwc->idx = -1; |
| 419 | hwc->config_base = 0; |
| 420 | hwc->config = 0; |
| 421 | hwc->event_base = 0; |
| 422 | |
| 423 | /* |
| 424 | * Check whether we need to exclude the counter from certain modes. |
| 425 | */ |
| 426 | if ((!armpmu->set_event_filter || |
| 427 | armpmu->set_event_filter(hwc, &event->attr)) && |
| 428 | event_requires_mode_exclusion(&event->attr)) { |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 429 | pr_debug("ARM performance counters do not support " |
| 430 | "mode exclusion\n"); |
Will Deacon | fdeb8e3 | 2012-07-04 18:15:42 +0100 | [diff] [blame] | 431 | return -EOPNOTSUPP; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | /* |
Will Deacon | 05d22fd | 2011-07-19 11:57:30 +0100 | [diff] [blame] | 435 | * Store the event encoding into the config_base field. |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 436 | */ |
Will Deacon | 05d22fd | 2011-07-19 11:57:30 +0100 | [diff] [blame] | 437 | hwc->config_base |= (unsigned long)mapping; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 438 | |
Vince Weaver | edcb4d3 | 2014-05-16 17:15:49 -0400 | [diff] [blame] | 439 | if (!is_sampling_event(event)) { |
Will Deacon | 5727347 | 2012-03-06 17:33:17 +0100 | [diff] [blame] | 440 | /* |
| 441 | * For non-sampling runs, limit the sample_period to half |
| 442 | * of the counter width. That way, the new counter value |
| 443 | * is far less likely to overtake the previous one unless |
| 444 | * you have some serious IRQ latency issues. |
| 445 | */ |
| 446 | hwc->sample_period = armpmu->max_period >> 1; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 447 | hwc->last_period = hwc->sample_period; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 448 | local64_set(&hwc->period_left, hwc->sample_period); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 449 | } |
| 450 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 451 | if (event->group_leader != event) { |
Chen Gang | e595ede | 2013-02-28 17:51:29 +0100 | [diff] [blame] | 452 | if (validate_group(event) != 0) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 453 | return -EINVAL; |
| 454 | } |
| 455 | |
Mark Rutland | 9dcbf46 | 2013-01-18 16:10:06 +0000 | [diff] [blame] | 456 | return 0; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 457 | } |
| 458 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 459 | static int armpmu_event_init(struct perf_event *event) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 460 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 461 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 462 | int err = 0; |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 463 | atomic_t *active_events = &armpmu->active_events; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 464 | |
Mark Rutland | cc88116 | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 465 | /* |
| 466 | * Reject CPU-affine events for CPUs that are of a different class to |
| 467 | * that which this PMU handles. Process-following events (where |
| 468 | * event->cpu == -1) can be migrated between CPUs, and thus we have to |
| 469 | * reject them later (in armpmu_add) if they're scheduled on a |
| 470 | * different class of CPU. |
| 471 | */ |
| 472 | if (event->cpu != -1 && |
| 473 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) |
| 474 | return -ENOENT; |
| 475 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 476 | /* does not support taken branch sampling */ |
| 477 | if (has_branch_stack(event)) |
| 478 | return -EOPNOTSUPP; |
| 479 | |
Mark Rutland | e1f431b | 2011-04-28 15:47:10 +0100 | [diff] [blame] | 480 | if (armpmu->map_event(event) == -ENOENT) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 481 | return -ENOENT; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 482 | |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 483 | event->destroy = hw_perf_event_destroy; |
| 484 | |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 485 | if (!atomic_inc_not_zero(active_events)) { |
| 486 | mutex_lock(&armpmu->reserve_mutex); |
| 487 | if (atomic_read(active_events) == 0) |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 488 | err = armpmu_reserve_hardware(armpmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 489 | |
| 490 | if (!err) |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 491 | atomic_inc(active_events); |
| 492 | mutex_unlock(&armpmu->reserve_mutex); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | if (err) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 496 | return err; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 497 | |
| 498 | err = __hw_perf_event_init(event); |
| 499 | if (err) |
| 500 | hw_perf_event_destroy(event); |
| 501 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 502 | return err; |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 503 | } |
| 504 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 505 | static void armpmu_enable(struct pmu *pmu) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 506 | { |
Mark Rutland | 8be3f9a | 2011-05-17 11:20:11 +0100 | [diff] [blame] | 507 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
Mark Rutland | 1167925 | 2014-05-13 19:36:31 +0100 | [diff] [blame] | 508 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
Mark Rutland | 7325eae | 2011-08-23 11:59:49 +0100 | [diff] [blame] | 509 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 510 | |
Mark Rutland | cc88116 | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 511 | /* For task-bound events we may be called on other CPUs */ |
| 512 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) |
| 513 | return; |
| 514 | |
Will Deacon | f4f3843 | 2011-07-01 14:38:12 +0100 | [diff] [blame] | 515 | if (enabled) |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 516 | armpmu->start(armpmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 517 | } |
| 518 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 519 | static void armpmu_disable(struct pmu *pmu) |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 520 | { |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 521 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
Mark Rutland | cc88116 | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 522 | |
| 523 | /* For task-bound events we may be called on other CPUs */ |
| 524 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) |
| 525 | return; |
| 526 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 527 | armpmu->stop(armpmu); |
Jamie Iles | 1b8873a | 2010-02-02 20:25:44 +0100 | [diff] [blame] | 528 | } |
| 529 | |
Mark Rutland | c904e32 | 2015-05-13 17:12:26 +0100 | [diff] [blame] | 530 | /* |
| 531 | * In heterogeneous systems, events are specific to a particular |
| 532 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of |
| 533 | * the same microarchitecture. |
| 534 | */ |
| 535 | static int armpmu_filter_match(struct perf_event *event) |
| 536 | { |
| 537 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
| 538 | unsigned int cpu = smp_processor_id(); |
| 539 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); |
| 540 | } |
| 541 | |
Mark Rutland | 48538b5 | 2016-09-09 14:08:30 +0100 | [diff] [blame] | 542 | static ssize_t armpmu_cpumask_show(struct device *dev, |
| 543 | struct device_attribute *attr, char *buf) |
| 544 | { |
| 545 | struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); |
| 546 | return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); |
| 547 | } |
| 548 | |
| 549 | static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); |
| 550 | |
| 551 | static struct attribute *armpmu_common_attrs[] = { |
| 552 | &dev_attr_cpus.attr, |
| 553 | NULL, |
| 554 | }; |
| 555 | |
| 556 | static struct attribute_group armpmu_common_attr_group = { |
| 557 | .attrs = armpmu_common_attrs, |
| 558 | }; |
| 559 | |
Stephen Boyd | 44d6b1f | 2013-03-05 03:54:06 +0100 | [diff] [blame] | 560 | static void armpmu_init(struct arm_pmu *armpmu) |
Mark Rutland | 03b7898 | 2011-04-27 11:20:11 +0100 | [diff] [blame] | 561 | { |
| 562 | atomic_set(&armpmu->active_events, 0); |
| 563 | mutex_init(&armpmu->reserve_mutex); |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 564 | |
| 565 | armpmu->pmu = (struct pmu) { |
| 566 | .pmu_enable = armpmu_enable, |
| 567 | .pmu_disable = armpmu_disable, |
| 568 | .event_init = armpmu_event_init, |
| 569 | .add = armpmu_add, |
| 570 | .del = armpmu_del, |
| 571 | .start = armpmu_start, |
| 572 | .stop = armpmu_stop, |
| 573 | .read = armpmu_read, |
Mark Rutland | c904e32 | 2015-05-13 17:12:26 +0100 | [diff] [blame] | 574 | .filter_match = armpmu_filter_match, |
Mark Rutland | 1589680 | 2016-09-09 14:08:29 +0100 | [diff] [blame] | 575 | .attr_groups = armpmu->attr_groups, |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 576 | .events_across_hotplug = 1, |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 577 | }; |
Mark Rutland | 48538b5 | 2016-09-09 14:08:30 +0100 | [diff] [blame] | 578 | armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = |
| 579 | &armpmu_common_attr_group; |
Mark Rutland | 8a16b34 | 2011-04-28 16:27:54 +0100 | [diff] [blame] | 580 | } |
| 581 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 582 | /* Set at runtime when we know what CPU type we are. */ |
| 583 | static struct arm_pmu *__oprofile_cpu_pmu; |
| 584 | |
| 585 | /* |
| 586 | * Despite the names, these two functions are CPU-specific and are used |
| 587 | * by the OProfile/perf code. |
| 588 | */ |
| 589 | const char *perf_pmu_name(void) |
| 590 | { |
| 591 | if (!__oprofile_cpu_pmu) |
| 592 | return NULL; |
| 593 | |
| 594 | return __oprofile_cpu_pmu->name; |
| 595 | } |
| 596 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
| 597 | |
| 598 | int perf_num_counters(void) |
| 599 | { |
| 600 | int max_events = 0; |
| 601 | |
| 602 | if (__oprofile_cpu_pmu != NULL) |
| 603 | max_events = __oprofile_cpu_pmu->num_events; |
| 604 | |
| 605 | return max_events; |
| 606 | } |
| 607 | EXPORT_SYMBOL_GPL(perf_num_counters); |
| 608 | |
| 609 | static void cpu_pmu_enable_percpu_irq(void *data) |
| 610 | { |
| 611 | int irq = *(int *)data; |
| 612 | |
| 613 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
| 614 | } |
| 615 | |
| 616 | static void cpu_pmu_disable_percpu_irq(void *data) |
| 617 | { |
| 618 | int irq = *(int *)data; |
| 619 | |
| 620 | disable_percpu_irq(irq); |
| 621 | } |
| 622 | |
| 623 | static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) |
| 624 | { |
| 625 | int i, irq, irqs; |
| 626 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
| 627 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
| 628 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 629 | cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN; |
| 630 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 631 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
| 632 | |
| 633 | irq = platform_get_irq(pmu_device, 0); |
Marc Zyngier | 282b879 | 2016-09-06 15:34:44 +0100 | [diff] [blame] | 634 | if (irq > 0 && irq_is_percpu(irq)) { |
Marc Zyngier | 19a469a | 2016-07-08 15:56:04 +0100 | [diff] [blame] | 635 | on_each_cpu_mask(&cpu_pmu->supported_cpus, |
| 636 | cpu_pmu_disable_percpu_irq, &irq, 1); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 637 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 638 | cpu_pmu->percpu_irq = -1; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 639 | } else { |
| 640 | for (i = 0; i < irqs; ++i) { |
| 641 | int cpu = i; |
| 642 | |
| 643 | if (cpu_pmu->irq_affinity) |
| 644 | cpu = cpu_pmu->irq_affinity[i]; |
| 645 | |
| 646 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) |
| 647 | continue; |
| 648 | irq = platform_get_irq(pmu_device, i); |
Marc Zyngier | 282b879 | 2016-09-06 15:34:44 +0100 | [diff] [blame] | 649 | if (irq > 0) |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 650 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
| 651 | } |
| 652 | } |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 653 | cpu_pmu->pmu_state = ARM_PMU_STATE_OFF; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) |
| 657 | { |
| 658 | int i, err, irq, irqs; |
| 659 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
| 660 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
| 661 | |
| 662 | if (!pmu_device) |
| 663 | return -ENODEV; |
| 664 | |
| 665 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
| 666 | if (irqs < 1) { |
| 667 | pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | irq = platform_get_irq(pmu_device, 0); |
Marc Zyngier | 282b879 | 2016-09-06 15:34:44 +0100 | [diff] [blame] | 672 | if (irq > 0 && irq_is_percpu(irq)) { |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 673 | err = request_percpu_irq(irq, handler, "arm-pmu", |
| 674 | &hw_events->percpu_pmu); |
| 675 | if (err) { |
| 676 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
| 677 | irq); |
| 678 | return err; |
| 679 | } |
Marc Zyngier | 19a469a | 2016-07-08 15:56:04 +0100 | [diff] [blame] | 680 | |
| 681 | on_each_cpu_mask(&cpu_pmu->supported_cpus, |
| 682 | cpu_pmu_enable_percpu_irq, &irq, 1); |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 683 | cpu_pmu->percpu_irq = irq; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 684 | } else { |
| 685 | for (i = 0; i < irqs; ++i) { |
| 686 | int cpu = i; |
| 687 | |
| 688 | err = 0; |
| 689 | irq = platform_get_irq(pmu_device, i); |
| 690 | if (irq < 0) |
| 691 | continue; |
| 692 | |
| 693 | if (cpu_pmu->irq_affinity) |
| 694 | cpu = cpu_pmu->irq_affinity[i]; |
| 695 | |
| 696 | /* |
| 697 | * If we have a single PMU interrupt that we can't shift, |
| 698 | * assume that we're running on a uniprocessor machine and |
| 699 | * continue. Otherwise, continue without this interrupt. |
| 700 | */ |
| 701 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
| 702 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", |
| 703 | irq, cpu); |
| 704 | continue; |
| 705 | } |
| 706 | |
| 707 | err = request_irq(irq, handler, |
| 708 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", |
| 709 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
| 710 | if (err) { |
| 711 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
| 712 | irq); |
| 713 | return err; |
| 714 | } |
| 715 | |
| 716 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); |
| 717 | } |
| 718 | } |
| 719 | |
| 720 | return 0; |
| 721 | } |
| 722 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 723 | struct cpu_pm_pmu_args { |
| 724 | struct arm_pmu *armpmu; |
| 725 | unsigned long cmd; |
| 726 | int cpu; |
| 727 | int ret; |
| 728 | }; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 729 | |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 730 | #ifdef CONFIG_CPU_PM |
| 731 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) |
| 732 | { |
| 733 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
| 734 | struct perf_event *event; |
| 735 | int idx; |
| 736 | |
| 737 | for (idx = 0; idx < armpmu->num_events; idx++) { |
| 738 | /* |
| 739 | * If the counter is not used skip it, there is no |
| 740 | * need of stopping/restarting it. |
| 741 | */ |
| 742 | if (!test_bit(idx, hw_events->used_mask)) |
| 743 | continue; |
| 744 | |
| 745 | event = hw_events->events[idx]; |
| 746 | |
| 747 | switch (cmd) { |
| 748 | case CPU_PM_ENTER: |
| 749 | /* |
| 750 | * Stop and update the counter |
| 751 | */ |
| 752 | armpmu_stop(event, PERF_EF_UPDATE); |
| 753 | break; |
| 754 | case CPU_PM_EXIT: |
| 755 | case CPU_PM_ENTER_FAILED: |
Lorenzo Pieralisi | cbcc72e | 2016-04-21 10:24:34 +0100 | [diff] [blame] | 756 | /* |
| 757 | * Restore and enable the counter. |
| 758 | * armpmu_start() indirectly calls |
| 759 | * |
| 760 | * perf_event_update_userpage() |
| 761 | * |
| 762 | * that requires RCU read locking to be functional, |
| 763 | * wrap the call within RCU_NONIDLE to make the |
| 764 | * RCU subsystem aware this cpu is not idle from |
| 765 | * an RCU perspective for the armpmu_start() call |
| 766 | * duration. |
| 767 | */ |
| 768 | RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 769 | break; |
| 770 | default: |
| 771 | break; |
| 772 | } |
| 773 | } |
| 774 | } |
| 775 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 776 | static void cpu_pm_pmu_common(void *info) |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 777 | { |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 778 | struct cpu_pm_pmu_args *data = info; |
| 779 | struct arm_pmu *armpmu = data->armpmu; |
| 780 | unsigned long cmd = data->cmd; |
| 781 | int cpu = data->cpu; |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 782 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
| 783 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
| 784 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 785 | if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) { |
| 786 | data->ret = NOTIFY_DONE; |
| 787 | return; |
| 788 | } |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 789 | |
| 790 | /* |
| 791 | * Always reset the PMU registers on power-up even if |
| 792 | * there are no events running. |
| 793 | */ |
| 794 | if (cmd == CPU_PM_EXIT && armpmu->reset) |
| 795 | armpmu->reset(armpmu); |
| 796 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 797 | if (!enabled) { |
| 798 | data->ret = NOTIFY_OK; |
| 799 | return; |
| 800 | } |
| 801 | |
| 802 | data->ret = NOTIFY_OK; |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 803 | |
| 804 | switch (cmd) { |
| 805 | case CPU_PM_ENTER: |
| 806 | armpmu->stop(armpmu); |
| 807 | cpu_pm_pmu_setup(armpmu, cmd); |
| 808 | break; |
| 809 | case CPU_PM_EXIT: |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 810 | case CPU_PM_ENTER_FAILED: |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 811 | cpu_pm_pmu_setup(armpmu, cmd); |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 812 | armpmu->start(armpmu); |
| 813 | break; |
| 814 | default: |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 815 | data->ret = NOTIFY_DONE; |
| 816 | break; |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 817 | } |
| 818 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 819 | return; |
| 820 | } |
| 821 | |
| 822 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, |
| 823 | void *v) |
| 824 | { |
| 825 | struct cpu_pm_pmu_args data = { |
| 826 | .armpmu = container_of(b, struct arm_pmu, cpu_pm_nb), |
| 827 | .cmd = cmd, |
| 828 | .cpu = smp_processor_id(), |
| 829 | }; |
| 830 | |
| 831 | cpu_pm_pmu_common(&data); |
| 832 | return data.ret; |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 833 | } |
| 834 | |
| 835 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) |
| 836 | { |
| 837 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; |
| 838 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); |
| 839 | } |
| 840 | |
| 841 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) |
| 842 | { |
| 843 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); |
| 844 | } |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 845 | |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 846 | #else |
| 847 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } |
| 848 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 849 | static void cpu_pm_pmu_common(void *info) { } |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 850 | #endif |
| 851 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 852 | /* |
| 853 | * PMU hardware loses all context when a CPU goes offline. |
| 854 | * When a CPU is hotplugged back in, since some hardware registers are |
| 855 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading |
| 856 | * junk values out of them. |
| 857 | */ |
| 858 | static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) |
| 859 | { |
| 860 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
| 861 | |
| 862 | struct cpu_pm_pmu_args data = { |
| 863 | .armpmu = pmu, |
| 864 | .cpu = (int)cpu, |
| 865 | }; |
| 866 | |
| 867 | if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
| 868 | return 0; |
| 869 | |
| 870 | data.cmd = CPU_PM_EXIT; |
| 871 | cpu_pm_pmu_common(&data); |
| 872 | if (data.ret == NOTIFY_DONE) |
| 873 | return 0; |
| 874 | |
| 875 | if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF && |
| 876 | data.armpmu->plat_device) { |
| 877 | int irq = data.armpmu->percpu_irq; |
| 878 | |
| 879 | if (irq > 0 && irq_is_percpu(irq)) |
| 880 | cpu_pmu_enable_percpu_irq(&irq); |
| 881 | |
| 882 | } |
| 883 | |
| 884 | return 0; |
| 885 | } |
| 886 | |
| 887 | static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node) |
| 888 | { |
| 889 | struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); |
| 890 | |
| 891 | struct cpu_pm_pmu_args data = { |
| 892 | .armpmu = pmu, |
| 893 | .cpu = (int)cpu, |
| 894 | }; |
| 895 | |
| 896 | if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
| 897 | return 0; |
| 898 | |
| 899 | data.cmd = CPU_PM_ENTER; |
| 900 | cpu_pm_pmu_common(&data); |
| 901 | /* Disarm the PMU IRQ before disappearing. */ |
| 902 | if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING && |
| 903 | data.armpmu->plat_device) { |
| 904 | int irq = data.armpmu->percpu_irq; |
| 905 | |
| 906 | if (irq > 0 && irq_is_percpu(irq)) |
| 907 | cpu_pmu_disable_percpu_irq(&irq); |
| 908 | |
| 909 | } |
| 910 | |
| 911 | return 0; |
| 912 | } |
| 913 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 914 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
| 915 | { |
| 916 | int err; |
| 917 | int cpu; |
| 918 | struct pmu_hw_events __percpu *cpu_hw_events; |
| 919 | |
| 920 | cpu_hw_events = alloc_percpu(struct pmu_hw_events); |
| 921 | if (!cpu_hw_events) |
| 922 | return -ENOMEM; |
| 923 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 924 | err = cpuhp_state_add_instance_nocalls(USE_CPUHP_STATE, |
Sebastian Andrzej Siewior | 6e103c0 | 2016-08-17 19:14:20 +0200 | [diff] [blame] | 925 | &cpu_pmu->node); |
| 926 | if (err) |
| 927 | goto out_free; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 928 | |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 929 | err = cpu_pm_pmu_register(cpu_pmu); |
| 930 | if (err) |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 931 | goto out_unreg_perf_starting; |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 932 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 933 | for_each_possible_cpu(cpu) { |
| 934 | struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); |
| 935 | raw_spin_lock_init(&events->pmu_lock); |
| 936 | events->percpu_pmu = cpu_pmu; |
| 937 | } |
| 938 | |
| 939 | cpu_pmu->hw_events = cpu_hw_events; |
| 940 | cpu_pmu->request_irq = cpu_pmu_request_irq; |
| 941 | cpu_pmu->free_irq = cpu_pmu_free_irq; |
| 942 | |
| 943 | /* Ensure the PMU has sane values out of reset. */ |
| 944 | if (cpu_pmu->reset) |
| 945 | on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, |
| 946 | cpu_pmu, 1); |
| 947 | |
| 948 | /* If no interrupts available, set the corresponding capability flag */ |
| 949 | if (!platform_get_irq(cpu_pmu->plat_device, 0)) |
| 950 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
| 951 | |
Mark Rutland | 5101ef2 | 2016-04-26 11:33:46 +0100 | [diff] [blame] | 952 | /* |
| 953 | * This is a CPU PMU potentially in a heterogeneous configuration (e.g. |
| 954 | * big.LITTLE). This is not an uncore PMU, and we have taken ctx |
| 955 | * sharing into account (e.g. with our pmu::filter_match callback and |
| 956 | * pmu::event_init group validation). |
| 957 | */ |
| 958 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; |
| 959 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 960 | return 0; |
| 961 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 962 | out_unreg_perf_starting: |
| 963 | cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE, |
Sebastian Andrzej Siewior | 6e103c0 | 2016-08-17 19:14:20 +0200 | [diff] [blame] | 964 | &cpu_pmu->node); |
| 965 | out_free: |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 966 | free_percpu(cpu_hw_events); |
| 967 | return err; |
| 968 | } |
| 969 | |
| 970 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
| 971 | { |
Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 972 | cpu_pm_pmu_unregister(cpu_pmu); |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 973 | cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE, |
Sebastian Andrzej Siewior | 6e103c0 | 2016-08-17 19:14:20 +0200 | [diff] [blame] | 974 | &cpu_pmu->node); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 975 | free_percpu(cpu_pmu->hw_events); |
| 976 | } |
| 977 | |
| 978 | /* |
| 979 | * CPU PMU identification and probing. |
| 980 | */ |
| 981 | static int probe_current_pmu(struct arm_pmu *pmu, |
| 982 | const struct pmu_probe_info *info) |
| 983 | { |
| 984 | int cpu = get_cpu(); |
| 985 | unsigned int cpuid = read_cpuid_id(); |
| 986 | int ret = -ENODEV; |
| 987 | |
| 988 | pr_info("probing PMU on CPU %d\n", cpu); |
| 989 | |
| 990 | for (; info->init != NULL; info++) { |
| 991 | if ((cpuid & info->mask) != info->cpuid) |
| 992 | continue; |
| 993 | ret = info->init(pmu); |
| 994 | break; |
| 995 | } |
| 996 | |
| 997 | put_cpu(); |
| 998 | return ret; |
| 999 | } |
| 1000 | |
| 1001 | static int of_pmu_irq_cfg(struct arm_pmu *pmu) |
| 1002 | { |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1003 | int *irqs, i = 0; |
| 1004 | bool using_spi = false; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1005 | struct platform_device *pdev = pmu->plat_device; |
| 1006 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1007 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); |
| 1008 | if (!irqs) |
| 1009 | return -ENOMEM; |
| 1010 | |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1011 | do { |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1012 | struct device_node *dn; |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1013 | int cpu, irq; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1014 | |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1015 | /* See if we have an affinity entry */ |
| 1016 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); |
| 1017 | if (!dn) |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1018 | break; |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1019 | |
| 1020 | /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ |
| 1021 | irq = platform_get_irq(pdev, i); |
Marc Zyngier | 282b879 | 2016-09-06 15:34:44 +0100 | [diff] [blame] | 1022 | if (irq > 0) { |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1023 | bool spi = !irq_is_percpu(irq); |
| 1024 | |
| 1025 | if (i > 0 && spi != using_spi) { |
| 1026 | pr_err("PPI/SPI IRQ type mismatch for %s!\n", |
| 1027 | dn->name); |
Stefan Wahren | 7532468 | 2016-08-27 16:19:49 +0000 | [diff] [blame] | 1028 | of_node_put(dn); |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1029 | kfree(irqs); |
| 1030 | return -EINVAL; |
| 1031 | } |
| 1032 | |
| 1033 | using_spi = spi; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1034 | } |
| 1035 | |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1036 | /* Now look up the logical CPU number */ |
Will Deacon | fb65988 | 2015-10-12 14:48:39 +0100 | [diff] [blame] | 1037 | for_each_possible_cpu(cpu) { |
| 1038 | struct device_node *cpu_dn; |
| 1039 | |
| 1040 | cpu_dn = of_cpu_device_node_get(cpu); |
| 1041 | of_node_put(cpu_dn); |
| 1042 | |
| 1043 | if (dn == cpu_dn) |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1044 | break; |
Will Deacon | fb65988 | 2015-10-12 14:48:39 +0100 | [diff] [blame] | 1045 | } |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1046 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1047 | if (cpu >= nr_cpu_ids) { |
| 1048 | pr_warn("Failed to find logical CPU for %s\n", |
| 1049 | dn->name); |
Stephen Boyd | 8e0c34b | 2015-07-07 18:17:05 +0100 | [diff] [blame] | 1050 | of_node_put(dn); |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1051 | cpumask_setall(&pmu->supported_cpus); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1052 | break; |
| 1053 | } |
Stephen Boyd | 8e0c34b | 2015-07-07 18:17:05 +0100 | [diff] [blame] | 1054 | of_node_put(dn); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1055 | |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1056 | /* For SPIs, we need to track the affinity per IRQ */ |
| 1057 | if (using_spi) { |
Julien Grall | 121323a | 2016-05-31 12:41:21 +0100 | [diff] [blame] | 1058 | if (i >= pdev->num_resources) |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1059 | break; |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1060 | |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1061 | irqs[i] = cpu; |
| 1062 | } |
| 1063 | |
| 1064 | /* Keep track of the CPUs containing this PMU type */ |
| 1065 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1066 | i++; |
| 1067 | } while (1); |
| 1068 | |
Marc Zyngier | 19a469a | 2016-07-08 15:56:04 +0100 | [diff] [blame] | 1069 | /* If we didn't manage to parse anything, try the interrupt affinity */ |
| 1070 | if (cpumask_weight(&pmu->supported_cpus) == 0) { |
Marc Zyngier | 7f1d642 | 2016-07-19 15:39:02 +0100 | [diff] [blame] | 1071 | int irq = platform_get_irq(pdev, 0); |
Marc Zyngier | 19a469a | 2016-07-08 15:56:04 +0100 | [diff] [blame] | 1072 | |
Marc Zyngier | 282b879 | 2016-09-06 15:34:44 +0100 | [diff] [blame] | 1073 | if (irq > 0 && irq_is_percpu(irq)) { |
Marc Zyngier | 7f1d642 | 2016-07-19 15:39:02 +0100 | [diff] [blame] | 1074 | /* If using PPIs, check the affinity of the partition */ |
| 1075 | int ret; |
| 1076 | |
Marc Zyngier | 19a469a | 2016-07-08 15:56:04 +0100 | [diff] [blame] | 1077 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); |
| 1078 | if (ret) { |
| 1079 | kfree(irqs); |
| 1080 | return ret; |
| 1081 | } |
| 1082 | } else { |
| 1083 | /* Otherwise default to all CPUs */ |
| 1084 | cpumask_setall(&pmu->supported_cpus); |
| 1085 | } |
| 1086 | } |
Will Deacon | b6c084d | 2015-06-29 13:59:01 +0100 | [diff] [blame] | 1087 | |
| 1088 | /* If we matched up the IRQ affinities, use them to route the SPIs */ |
| 1089 | if (using_spi && i == pdev->num_resources) |
| 1090 | pmu->irq_affinity = irqs; |
| 1091 | else |
| 1092 | kfree(irqs); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1093 | |
| 1094 | return 0; |
| 1095 | } |
| 1096 | |
| 1097 | int arm_pmu_device_probe(struct platform_device *pdev, |
| 1098 | const struct of_device_id *of_table, |
| 1099 | const struct pmu_probe_info *probe_table) |
| 1100 | { |
| 1101 | const struct of_device_id *of_id; |
| 1102 | const int (*init_fn)(struct arm_pmu *); |
| 1103 | struct device_node *node = pdev->dev.of_node; |
| 1104 | struct arm_pmu *pmu; |
| 1105 | int ret = -ENODEV; |
| 1106 | |
| 1107 | pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); |
| 1108 | if (!pmu) { |
| 1109 | pr_info("failed to allocate PMU device!\n"); |
| 1110 | return -ENOMEM; |
| 1111 | } |
| 1112 | |
Mark Rutland | b916b78 | 2015-10-28 12:32:17 +0000 | [diff] [blame] | 1113 | armpmu_init(pmu); |
| 1114 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1115 | pmu->plat_device = pdev; |
| 1116 | |
| 1117 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { |
| 1118 | init_fn = of_id->data; |
| 1119 | |
Martin Fuzzey | 8d1a0ae | 2016-01-13 23:36:26 -0500 | [diff] [blame] | 1120 | pmu->secure_access = of_property_read_bool(pdev->dev.of_node, |
| 1121 | "secure-reg-access"); |
| 1122 | |
| 1123 | /* arm64 systems boot only as non-secure */ |
| 1124 | if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { |
| 1125 | pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); |
| 1126 | pmu->secure_access = false; |
| 1127 | } |
| 1128 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1129 | ret = of_pmu_irq_cfg(pmu); |
| 1130 | if (!ret) |
| 1131 | ret = init_fn(pmu); |
Mark Salter | dbee3a7 | 2016-09-14 17:32:29 -0500 | [diff] [blame] | 1132 | } else if (probe_table) { |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1133 | cpumask_setall(&pmu->supported_cpus); |
Mark Salter | f7a6c14 | 2016-06-07 11:32:21 -0500 | [diff] [blame] | 1134 | ret = probe_current_pmu(pmu, probe_table); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1135 | } |
| 1136 | |
| 1137 | if (ret) { |
Will Deacon | 357b565 | 2016-03-21 11:07:15 +0000 | [diff] [blame] | 1138 | pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1139 | goto out_free; |
| 1140 | } |
| 1141 | |
Mark Rutland | 86cdd72 | 2016-09-09 14:08:26 +0100 | [diff] [blame] | 1142 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1143 | ret = cpu_pmu_init(pmu); |
| 1144 | if (ret) |
| 1145 | goto out_free; |
| 1146 | |
Mark Rutland | b916b78 | 2015-10-28 12:32:17 +0000 | [diff] [blame] | 1147 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1148 | if (ret) |
| 1149 | goto out_destroy; |
| 1150 | |
Julien Grall | 0f254c7 | 2016-05-31 12:41:22 +0100 | [diff] [blame] | 1151 | if (!__oprofile_cpu_pmu) |
| 1152 | __oprofile_cpu_pmu = pmu; |
| 1153 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 1154 | pmu->pmu_state = ARM_PMU_STATE_OFF; |
| 1155 | pmu->percpu_irq = -1; |
| 1156 | |
Mark Rutland | b916b78 | 2015-10-28 12:32:17 +0000 | [diff] [blame] | 1157 | pr_info("enabled with %s PMU driver, %d counters available\n", |
| 1158 | pmu->name, pmu->num_events); |
| 1159 | |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1160 | return 0; |
| 1161 | |
| 1162 | out_destroy: |
| 1163 | cpu_pmu_destroy(pmu); |
| 1164 | out_free: |
Will Deacon | 357b565 | 2016-03-21 11:07:15 +0000 | [diff] [blame] | 1165 | pr_info("%s: failed to register PMU devices!\n", |
| 1166 | of_node_full_name(node)); |
Julien Grall | 5988a36 | 2016-05-31 12:41:23 +0100 | [diff] [blame] | 1167 | kfree(pmu->irq_affinity); |
Mark Rutland | 74cf0bc | 2015-05-26 17:23:39 +0100 | [diff] [blame] | 1168 | kfree(pmu); |
| 1169 | return ret; |
| 1170 | } |
Sebastian Andrzej Siewior | 37b502f | 2016-07-20 09:51:11 +0200 | [diff] [blame] | 1171 | |
| 1172 | static int arm_pmu_hp_init(void) |
| 1173 | { |
| 1174 | int ret; |
| 1175 | |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 1176 | ret = cpuhp_setup_state_multi(USE_CPUHP_STATE, |
| 1177 | USE_CPUHP_STR, |
| 1178 | arm_perf_starting_cpu, |
| 1179 | arm_perf_stopping_cpu); |
Sebastian Andrzej Siewior | 37b502f | 2016-07-20 09:51:11 +0200 | [diff] [blame] | 1180 | if (ret) |
Patrick Fay | b02d764 | 2017-04-03 19:20:57 -0700 | [diff] [blame] | 1181 | pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n", |
Sebastian Andrzej Siewior | 37b502f | 2016-07-20 09:51:11 +0200 | [diff] [blame] | 1182 | ret); |
| 1183 | return ret; |
| 1184 | } |
| 1185 | subsys_initcall(arm_pmu_hp_init); |