blob: af0f44effd44abc067d7f31e498c433fd061725e [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Mark Rutland86cdd722016-09-09 14:08:26 +010017#include <linux/sysfs.h>
Mark Rutland548a86c2014-05-23 18:11:14 +010018#include <asm/cputype.h>
19
Will Deaconb0e89592011-07-26 22:10:28 +010020/*
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053021 * struct arm_pmu_platdata - ARM PMU platform data
22 *
Ming Leie0516a62011-03-02 15:00:08 +080023 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
Will Deacona3287c42017-07-25 16:30:34 +010027 *
28 * @irq_flags: if non-zero, these flags will be passed to request_irq
29 * when requesting interrupts for this PMU device.
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053030 */
31struct arm_pmu_platdata {
32 irqreturn_t (*handle_irq)(int irq, void *dev,
33 irq_handler_t pmu_handler);
Will Deacona3287c42017-07-25 16:30:34 +010034 unsigned long irq_flags;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053035};
36
Mark Rutlandfa8ad782015-07-06 12:23:53 +010037#ifdef CONFIG_ARM_PMU
Mark Rutland0ce47082011-05-19 10:07:57 +010038
Mark Rutlandac8674d2014-05-28 18:08:40 +010039/*
40 * The ARMv7 CPU PMU supports up to 32 event counters.
41 */
42#define ARMPMU_MAX_HWEVENTS 32
43
44#define HW_OP_UNSUPPORTED 0xFFFF
45#define C(_x) PERF_COUNT_HW_CACHE_##_x
46#define CACHE_OP_UNSUPPORTED 0xFFFF
47
Mark Rutland1113ff92014-05-29 17:29:51 +010048#define PERF_MAP_ALL_UNSUPPORTED \
49 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
50
51#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
52[0 ... C(MAX) - 1] = { \
53 [0 ... C(OP_MAX) - 1] = { \
54 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
55 }, \
56}
57
Mark Rutland0ce47082011-05-19 10:07:57 +010058/* The events for a given PMU register set. */
59struct pmu_hw_events {
60 /*
61 * The events that are active on the PMU for the given index.
62 */
Mark Rutlanda4560842014-05-13 19:08:19 +010063 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
Mark Rutland0ce47082011-05-19 10:07:57 +010064
65 /*
66 * A 1 bit for an index indicates that the counter is being used for
67 * an event. A 0 means that the counter can be used.
68 */
Mark Rutlanda4560842014-05-13 19:08:19 +010069 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +010070
71 /*
72 * Hardware lock to serialize accesses to PMU registers. Needed for the
73 * read/modify/write sequences.
74 */
75 raw_spinlock_t pmu_lock;
Mark Rutland5ebd9202014-05-13 19:46:10 +010076
77 /*
78 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
79 * already have to allocate this struct per cpu.
80 */
81 struct arm_pmu *percpu_pmu;
Mark Rutland7ed98e02017-03-10 10:46:14 +000082
83 int irq;
Mark Rutland0ce47082011-05-19 10:07:57 +010084};
85
Mark Rutland86cdd722016-09-09 14:08:26 +010086enum armpmu_attr_groups {
Mark Rutland48538b52016-09-09 14:08:30 +010087 ARMPMU_ATTR_GROUP_COMMON,
Mark Rutland86cdd722016-09-09 14:08:26 +010088 ARMPMU_ATTR_GROUP_EVENTS,
89 ARMPMU_ATTR_GROUP_FORMATS,
90 ARMPMU_NR_ATTR_GROUPS
91};
92
Mark Rutland0ce47082011-05-19 10:07:57 +010093struct arm_pmu {
94 struct pmu pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +010095 cpumask_t active_irqs;
Mark Rutlandcc88116d2015-05-13 17:12:25 +010096 cpumask_t supported_cpus;
Will Deacon4295b892012-07-06 15:45:00 +010097 char *name;
Mark Rutland0ce47082011-05-19 10:07:57 +010098 irqreturn_t (*handle_irq)(int irq_num, void *dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010099 void (*enable)(struct perf_event *event);
100 void (*disable)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100101 int (*get_event_idx)(struct pmu_hw_events *hw_events,
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100102 struct perf_event *event);
Stephen Boydeab443e2014-02-07 21:01:22 +0000103 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
104 struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100105 int (*set_event_filter)(struct hw_perf_event *evt,
106 struct perf_event_attr *attr);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100107 u32 (*read_counter)(struct perf_event *event);
108 void (*write_counter)(struct perf_event *event, u32 val);
109 void (*start)(struct arm_pmu *);
110 void (*stop)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +0100111 void (*reset)(void *);
112 int (*map_event)(struct perf_event *event);
113 int num_events;
Mark Rutland0ce47082011-05-19 10:07:57 +0100114 u64 max_period;
Martin Fuzzey8d1a0ae2016-01-13 23:36:26 -0500115 bool secure_access; /* 32-bit ARM only */
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700116#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
117 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +0100118 struct platform_device *plat_device;
Mark Rutland11679252014-05-13 19:36:31 +0100119 struct pmu_hw_events __percpu *hw_events;
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200120 struct hlist_node node;
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000121 struct notifier_block cpu_pm_nb;
Mark Rutland86cdd722016-09-09 14:08:26 +0100122 /* the attr_groups array must be NULL-terminated */
123 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
Mark Rutland45736a72017-04-11 09:39:55 +0100124
125 /* Only to be used by ACPI probing code */
126 unsigned long acpi_cpuid;
Mark Rutland0ce47082011-05-19 10:07:57 +0100127};
128
129#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
130
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100131u64 armpmu_event_update(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100132
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100133int armpmu_event_set_period(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100134
Will Deacon6dbc0022012-07-29 12:36:28 +0100135int armpmu_map_event(struct perf_event *event,
136 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
137 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX],
140 u32 raw_event_mask);
141
Mark Rutland083c5212017-04-11 09:39:45 +0100142typedef int (*armpmu_init_fn)(struct arm_pmu *);
143
Mark Rutland548a86c2014-05-23 18:11:14 +0100144struct pmu_probe_info {
145 unsigned int cpuid;
146 unsigned int mask;
Mark Rutland083c5212017-04-11 09:39:45 +0100147 armpmu_init_fn init;
Mark Rutland548a86c2014-05-23 18:11:14 +0100148};
149
150#define PMU_PROBE(_cpuid, _mask, _fn) \
151{ \
152 .cpuid = (_cpuid), \
153 .mask = (_mask), \
154 .init = (_fn), \
155}
156
157#define ARM_PMU_PROBE(_cpuid, _fn) \
158 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
159
160#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
161
162#define XSCALE_PMU_PROBE(_version, _fn) \
163 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
164
Mark Rutlandcfdad292015-05-26 17:23:35 +0100165int arm_pmu_device_probe(struct platform_device *pdev,
166 const struct of_device_id *of_table,
167 const struct pmu_probe_info *probe_table);
168
Mark Rutland45736a72017-04-11 09:39:55 +0100169#ifdef CONFIG_ACPI
170int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
171#else
172static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
173#endif
174
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100175/* Internal functions only for core arm_pmu code */
176struct arm_pmu *armpmu_alloc(void);
177void armpmu_free(struct arm_pmu *pmu);
178int armpmu_register(struct arm_pmu *pmu);
179int armpmu_request_irqs(struct arm_pmu *armpmu);
180void armpmu_free_irqs(struct arm_pmu *armpmu);
Mark Rutland45736a72017-04-11 09:39:55 +0100181int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
182void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100183
Jeremy Linton85023b22016-09-14 17:32:31 -0500184#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
185
Mark Rutlandfa8ad782015-07-06 12:23:53 +0100186#endif /* CONFIG_ARM_PMU */
Mark Rutland0ce47082011-05-19 10:07:57 +0100187
Jamie Iles0f4f0672010-02-02 20:23:15 +0100188#endif /* __ARM_PMU_H__ */