Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License version 2 as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 14 | * |
| 15 | * Copyright (C) 2012 ARM Limited |
| 16 | * |
| 17 | * Author: Will Deacon <will.deacon@arm.com> |
| 18 | */ |
| 19 | #define pr_fmt(fmt) "CPU PMU: " fmt |
| 20 | |
| 21 | #include <linux/bitmap.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/of.h> |
| 25 | #include <linux/platform_device.h> |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 26 | #include <linux/slab.h> |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 27 | #include <linux/spinlock.h> |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 28 | #include <linux/irq.h> |
| 29 | #include <linux/irqdesc.h> |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 30 | |
| 31 | #include <asm/cputype.h> |
| 32 | #include <asm/irq_regs.h> |
| 33 | #include <asm/pmu.h> |
| 34 | |
| 35 | /* Set at runtime when we know what CPU type we are. */ |
| 36 | static struct arm_pmu *cpu_pmu; |
| 37 | |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 38 | /* |
| 39 | * Despite the names, these two functions are CPU-specific and are used |
| 40 | * by the OProfile/perf code. |
| 41 | */ |
| 42 | const char *perf_pmu_name(void) |
| 43 | { |
| 44 | if (!cpu_pmu) |
| 45 | return NULL; |
| 46 | |
Will Deacon | 0305230 | 2012-09-21 14:23:47 +0100 | [diff] [blame] | 47 | return cpu_pmu->name; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 48 | } |
| 49 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
| 50 | |
| 51 | int perf_num_counters(void) |
| 52 | { |
| 53 | int max_events = 0; |
| 54 | |
| 55 | if (cpu_pmu != NULL) |
| 56 | max_events = cpu_pmu->num_events; |
| 57 | |
| 58 | return max_events; |
| 59 | } |
| 60 | EXPORT_SYMBOL_GPL(perf_num_counters); |
| 61 | |
| 62 | /* Include the PMU-specific implementations. */ |
| 63 | #include "perf_event_xscale.c" |
| 64 | #include "perf_event_v6.c" |
| 65 | #include "perf_event_v7.c" |
| 66 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 67 | static void cpu_pmu_enable_percpu_irq(void *data) |
| 68 | { |
Stephen Boyd | 505013b | 2014-09-11 23:25:30 +0100 | [diff] [blame] | 69 | int irq = *(int *)data; |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 70 | |
| 71 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | static void cpu_pmu_disable_percpu_irq(void *data) |
| 75 | { |
Stephen Boyd | 505013b | 2014-09-11 23:25:30 +0100 | [diff] [blame] | 76 | int irq = *(int *)data; |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 77 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 78 | disable_percpu_irq(irq); |
| 79 | } |
| 80 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 81 | static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 82 | { |
| 83 | int i, irq, irqs; |
| 84 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 85 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 86 | |
| 87 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
| 88 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 89 | irq = platform_get_irq(pmu_device, 0); |
| 90 | if (irq >= 0 && irq_is_percpu(irq)) { |
Stephen Boyd | 505013b | 2014-09-11 23:25:30 +0100 | [diff] [blame] | 91 | on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 92 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 93 | } else { |
| 94 | for (i = 0; i < irqs; ++i) { |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 95 | int cpu = i; |
| 96 | |
| 97 | if (cpu_pmu->irq_affinity) |
| 98 | cpu = cpu_pmu->irq_affinity[i]; |
| 99 | |
| 100 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 101 | continue; |
| 102 | irq = platform_get_irq(pmu_device, i); |
| 103 | if (irq >= 0) |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 104 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 105 | } |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 106 | } |
| 107 | } |
| 108 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 109 | static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 110 | { |
| 111 | int i, err, irq, irqs; |
| 112 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 113 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 114 | |
| 115 | if (!pmu_device) |
| 116 | return -ENODEV; |
| 117 | |
| 118 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
| 119 | if (irqs < 1) { |
Will Deacon | 52a5566 | 2014-10-30 11:26:57 +0000 | [diff] [blame] | 120 | pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); |
Vince Weaver | edcb4d3 | 2014-05-16 17:15:49 -0400 | [diff] [blame] | 121 | return 0; |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 122 | } |
| 123 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 124 | irq = platform_get_irq(pmu_device, 0); |
| 125 | if (irq >= 0 && irq_is_percpu(irq)) { |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 126 | err = request_percpu_irq(irq, handler, "arm-pmu", |
| 127 | &hw_events->percpu_pmu); |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 128 | if (err) { |
| 129 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
| 130 | irq); |
| 131 | return err; |
| 132 | } |
Stephen Boyd | 505013b | 2014-09-11 23:25:30 +0100 | [diff] [blame] | 133 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 134 | } else { |
| 135 | for (i = 0; i < irqs; ++i) { |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 136 | int cpu = i; |
| 137 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 138 | err = 0; |
| 139 | irq = platform_get_irq(pmu_device, i); |
| 140 | if (irq < 0) |
| 141 | continue; |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 142 | |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 143 | if (cpu_pmu->irq_affinity) |
| 144 | cpu = cpu_pmu->irq_affinity[i]; |
| 145 | |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 146 | /* |
| 147 | * If we have a single PMU interrupt that we can't shift, |
| 148 | * assume that we're running on a uniprocessor machine and |
| 149 | * continue. Otherwise, continue without this interrupt. |
| 150 | */ |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 151 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
Joe Perches | 8b521cb | 2014-09-16 20:41:43 +0100 | [diff] [blame] | 152 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 153 | irq, cpu); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 154 | continue; |
| 155 | } |
| 156 | |
| 157 | err = request_irq(irq, handler, |
| 158 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 159 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 160 | if (err) { |
| 161 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
| 162 | irq); |
| 163 | return err; |
| 164 | } |
| 165 | |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 166 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); |
Stephen Boyd | bbd6455 | 2014-02-07 21:01:19 +0000 | [diff] [blame] | 167 | } |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 173 | /* |
| 174 | * PMU hardware loses all context when a CPU goes offline. |
| 175 | * When a CPU is hotplugged back in, since some hardware registers are |
| 176 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading |
| 177 | * junk values out of them. |
| 178 | */ |
| 179 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, |
| 180 | void *hcpu) |
| 181 | { |
| 182 | struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); |
| 183 | |
| 184 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) |
| 185 | return NOTIFY_DONE; |
| 186 | |
| 187 | if (pmu->reset) |
| 188 | pmu->reset(pmu); |
| 189 | else |
| 190 | return NOTIFY_DONE; |
| 191 | |
| 192 | return NOTIFY_OK; |
| 193 | } |
| 194 | |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 195 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 196 | { |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 197 | int err; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 198 | int cpu; |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 199 | struct pmu_hw_events __percpu *cpu_hw_events; |
| 200 | |
| 201 | cpu_hw_events = alloc_percpu(struct pmu_hw_events); |
| 202 | if (!cpu_hw_events) |
| 203 | return -ENOMEM; |
| 204 | |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 205 | cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; |
| 206 | err = register_cpu_notifier(&cpu_pmu->hotplug_nb); |
| 207 | if (err) |
| 208 | goto out_hw_events; |
| 209 | |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 210 | for_each_possible_cpu(cpu) { |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 211 | struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 212 | raw_spin_lock_init(&events->pmu_lock); |
Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 213 | events->percpu_pmu = cpu_pmu; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 214 | } |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 215 | |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 216 | cpu_pmu->hw_events = cpu_hw_events; |
Sudeep KarkadaNagesha | 051f1b1 | 2012-07-31 10:34:25 +0100 | [diff] [blame] | 217 | cpu_pmu->request_irq = cpu_pmu_request_irq; |
| 218 | cpu_pmu->free_irq = cpu_pmu_free_irq; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 219 | |
| 220 | /* Ensure the PMU has sane values out of reset. */ |
Will Deacon | 1764c59 | 2013-01-14 17:27:35 +0000 | [diff] [blame] | 221 | if (cpu_pmu->reset) |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 222 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); |
Vince Weaver | edcb4d3 | 2014-05-16 17:15:49 -0400 | [diff] [blame] | 223 | |
| 224 | /* If no interrupts available, set the corresponding capability flag */ |
| 225 | if (!platform_get_irq(cpu_pmu->plat_device, 0)) |
| 226 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 227 | |
| 228 | return 0; |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 229 | |
| 230 | out_hw_events: |
| 231 | free_percpu(cpu_hw_events); |
| 232 | return err; |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
| 236 | { |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 237 | unregister_cpu_notifier(&cpu_pmu->hotplug_nb); |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 238 | free_percpu(cpu_pmu->hw_events); |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | /* |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 242 | * PMU platform driver and devicetree bindings. |
| 243 | */ |
Uwe Kleine-König | 444d2d3 | 2015-02-18 21:19:56 +0100 | [diff] [blame] | 244 | static const struct of_device_id cpu_pmu_of_device_ids[] = { |
Will Deacon | 03eff46 | 2014-05-09 18:34:19 +0100 | [diff] [blame] | 245 | {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 246 | {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, |
Albin Tonnerre | 8e781f6 | 2014-01-29 14:28:57 +0000 | [diff] [blame] | 247 | {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 248 | {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, |
| 249 | {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, |
| 250 | {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, |
| 251 | {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, |
| 252 | {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, |
Mark Rutland | 3d1ff75 | 2012-12-19 16:33:24 +0000 | [diff] [blame] | 253 | {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, |
| 254 | {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, |
Stephen Boyd | 2a3391c | 2014-02-07 21:01:21 +0000 | [diff] [blame] | 255 | {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, |
Stephen Boyd | 341e42c | 2015-02-27 16:11:35 -0800 | [diff] [blame] | 256 | {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, |
| 257 | {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 258 | {}, |
| 259 | }; |
| 260 | |
Greg Kroah-Hartman | 351a102 | 2012-12-21 14:02:24 -0800 | [diff] [blame] | 261 | static struct platform_device_id cpu_pmu_plat_device_ids[] = { |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 262 | {.name = "arm-pmu"}, |
Mark Rutland | 253d8c3 | 2014-05-22 11:49:18 +0100 | [diff] [blame] | 263 | {.name = "armv6-pmu"}, |
| 264 | {.name = "armv7-pmu"}, |
| 265 | {.name = "xscale-pmu"}, |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 266 | {}, |
| 267 | }; |
| 268 | |
Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 269 | static const struct pmu_probe_info pmu_probe_table[] = { |
| 270 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init), |
| 271 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init), |
| 272 | ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init), |
| 273 | ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init), |
| 274 | ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init), |
| 275 | ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init), |
| 276 | XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init), |
| 277 | XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init), |
| 278 | { /* sentinel value */ } |
| 279 | }; |
| 280 | |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 281 | /* |
| 282 | * CPU PMU identification and probing. |
| 283 | */ |
Greg Kroah-Hartman | 351a102 | 2012-12-21 14:02:24 -0800 | [diff] [blame] | 284 | static int probe_current_pmu(struct arm_pmu *pmu) |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 285 | { |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 286 | int cpu = get_cpu(); |
Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 287 | unsigned int cpuid = read_cpuid_id(); |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 288 | int ret = -ENODEV; |
Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 289 | const struct pmu_probe_info *info; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 290 | |
| 291 | pr_info("probing PMU on CPU %d\n", cpu); |
| 292 | |
Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 293 | for (info = pmu_probe_table; info->init != NULL; info++) { |
| 294 | if ((cpuid & info->mask) != info->cpuid) |
| 295 | continue; |
| 296 | ret = info->init(pmu); |
Russell King | af040ff | 2014-06-24 19:43:15 +0100 | [diff] [blame] | 297 | break; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | put_cpu(); |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 301 | return ret; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 302 | } |
| 303 | |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 304 | static int of_pmu_irq_cfg(struct platform_device *pdev) |
| 305 | { |
Will Deacon | 338d9dd | 2015-05-01 17:15:23 +0100 | [diff] [blame] | 306 | int i, irq; |
Will Deacon | 8d28128 | 2015-05-14 18:07:44 +0100 | [diff] [blame] | 307 | int *irqs; |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 308 | |
Will Deacon | 338d9dd | 2015-05-01 17:15:23 +0100 | [diff] [blame] | 309 | /* Don't bother with PPIs; they're already affine */ |
| 310 | irq = platform_get_irq(pdev, 0); |
| 311 | if (irq >= 0 && irq_is_percpu(irq)) |
| 312 | return 0; |
| 313 | |
Will Deacon | 8d28128 | 2015-05-14 18:07:44 +0100 | [diff] [blame] | 314 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); |
| 315 | if (!irqs) |
| 316 | return -ENOMEM; |
| 317 | |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 318 | for (i = 0; i < pdev->num_resources; ++i) { |
| 319 | struct device_node *dn; |
| 320 | int cpu; |
| 321 | |
| 322 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", |
| 323 | i); |
| 324 | if (!dn) { |
| 325 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", |
Will Deacon | 3b8786f | 2015-05-01 17:16:01 +0100 | [diff] [blame] | 326 | of_node_full_name(pdev->dev.of_node), i); |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 327 | break; |
| 328 | } |
| 329 | |
| 330 | for_each_possible_cpu(cpu) |
| 331 | if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) |
| 332 | break; |
| 333 | |
| 334 | of_node_put(dn); |
| 335 | if (cpu >= nr_cpu_ids) { |
| 336 | pr_warn("Failed to find logical CPU for %s\n", |
| 337 | dn->name); |
| 338 | break; |
| 339 | } |
| 340 | |
| 341 | irqs[i] = cpu; |
| 342 | } |
| 343 | |
| 344 | if (i == pdev->num_resources) |
| 345 | cpu_pmu->irq_affinity = irqs; |
| 346 | else |
| 347 | kfree(irqs); |
| 348 | |
| 349 | return 0; |
| 350 | } |
| 351 | |
Greg Kroah-Hartman | 351a102 | 2012-12-21 14:02:24 -0800 | [diff] [blame] | 352 | static int cpu_pmu_device_probe(struct platform_device *pdev) |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 353 | { |
| 354 | const struct of_device_id *of_id; |
Stephen Boyd | 261521f | 2014-01-10 00:57:06 +0100 | [diff] [blame] | 355 | const int (*init_fn)(struct arm_pmu *); |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 356 | struct device_node *node = pdev->dev.of_node; |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 357 | struct arm_pmu *pmu; |
| 358 | int ret = -ENODEV; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 359 | |
| 360 | if (cpu_pmu) { |
Mark Rutland | 0f2a210 | 2014-10-23 15:59:35 +0100 | [diff] [blame] | 361 | pr_info("attempt to register multiple PMU devices!\n"); |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 362 | return -ENOSPC; |
| 363 | } |
| 364 | |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 365 | pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); |
| 366 | if (!pmu) { |
Mark Rutland | 0f2a210 | 2014-10-23 15:59:35 +0100 | [diff] [blame] | 367 | pr_info("failed to allocate PMU device!\n"); |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 368 | return -ENOMEM; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 369 | } |
| 370 | |
Stephen Boyd | 3a3967e | 2014-02-07 21:01:20 +0000 | [diff] [blame] | 371 | cpu_pmu = pmu; |
| 372 | cpu_pmu->plat_device = pdev; |
| 373 | |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 374 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { |
| 375 | init_fn = of_id->data; |
Will Deacon | 9fd85eb | 2015-03-06 11:54:09 +0000 | [diff] [blame] | 376 | |
| 377 | ret = of_pmu_irq_cfg(pdev); |
| 378 | if (!ret) |
| 379 | ret = init_fn(pmu); |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 380 | } else { |
| 381 | ret = probe_current_pmu(pmu); |
| 382 | } |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 383 | |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 384 | if (ret) { |
Mark Rutland | 0f2a210 | 2014-10-23 15:59:35 +0100 | [diff] [blame] | 385 | pr_info("failed to probe PMU!\n"); |
Mark Rutland | 76b8a0e | 2013-01-18 13:42:58 +0000 | [diff] [blame] | 386 | goto out_free; |
Sudeep KarkadaNagesha | 513c99c | 2012-07-31 10:11:23 +0100 | [diff] [blame] | 387 | } |
| 388 | |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 389 | ret = cpu_pmu_init(cpu_pmu); |
| 390 | if (ret) |
| 391 | goto out_free; |
| 392 | |
Mark Rutland | 67b4305 | 2012-09-12 10:53:23 +0100 | [diff] [blame] | 393 | ret = armpmu_register(cpu_pmu, -1); |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 394 | if (ret) |
| 395 | goto out_destroy; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 396 | |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 397 | return 0; |
Mark Rutland | 76b8a0e | 2013-01-18 13:42:58 +0000 | [diff] [blame] | 398 | |
Mark Rutland | abdf655 | 2014-10-21 14:11:23 +0100 | [diff] [blame] | 399 | out_destroy: |
| 400 | cpu_pmu_destroy(cpu_pmu); |
Mark Rutland | 76b8a0e | 2013-01-18 13:42:58 +0000 | [diff] [blame] | 401 | out_free: |
Mark Rutland | 0f2a210 | 2014-10-23 15:59:35 +0100 | [diff] [blame] | 402 | pr_info("failed to register PMU devices!\n"); |
Mark Rutland | 76b8a0e | 2013-01-18 13:42:58 +0000 | [diff] [blame] | 403 | kfree(pmu); |
| 404 | return ret; |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | static struct platform_driver cpu_pmu_driver = { |
| 408 | .driver = { |
| 409 | .name = "arm-pmu", |
| 410 | .pm = &armpmu_dev_pm_ops, |
| 411 | .of_match_table = cpu_pmu_of_device_ids, |
| 412 | }, |
| 413 | .probe = cpu_pmu_device_probe, |
| 414 | .id_table = cpu_pmu_plat_device_ids, |
| 415 | }; |
| 416 | |
| 417 | static int __init register_pmu_driver(void) |
| 418 | { |
Mark Rutland | af66abf | 2014-10-23 15:23:35 +0100 | [diff] [blame] | 419 | return platform_driver_register(&cpu_pmu_driver); |
Will Deacon | 5505b20 | 2012-07-29 13:09:14 +0100 | [diff] [blame] | 420 | } |
| 421 | device_initcall(register_pmu_driver); |