blob: a9b0ee408fbd44232644ade36fd86846d2d4c243 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Jamie Iles0f4f0672010-02-02 20:23:15 +01002/*
3 * linux/arch/arm/include/asm/pmu.h
4 *
5 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
Jamie Iles0f4f0672010-02-02 20:23:15 +01006 */
7
8#ifndef __ARM_PMU_H__
9#define __ARM_PMU_H__
10
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053011#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010012#include <linux/perf_event.h>
Mark Rutland167e6142017-10-09 17:09:05 +010013#include <linux/platform_device.h>
Mark Rutland86cdd722016-09-09 14:08:26 +010014#include <linux/sysfs.h>
Mark Rutland548a86c2014-05-23 18:11:14 +010015#include <asm/cputype.h>
16
Mark Rutlandfa8ad782015-07-06 12:23:53 +010017#ifdef CONFIG_ARM_PMU
Mark Rutland0ce47082011-05-19 10:07:57 +010018
Mark Rutlandac8674d2014-05-28 18:08:40 +010019/*
20 * The ARMv7 CPU PMU supports up to 32 event counters.
21 */
22#define ARMPMU_MAX_HWEVENTS 32
23
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +010024/*
25 * ARM PMU hw_event flags
26 */
27/* Event uses a 64bit counter */
28#define ARMPMU_EVT_64BIT 1
29
Mark Rutlandac8674d2014-05-28 18:08:40 +010030#define HW_OP_UNSUPPORTED 0xFFFF
31#define C(_x) PERF_COUNT_HW_CACHE_##_x
32#define CACHE_OP_UNSUPPORTED 0xFFFF
33
Mark Rutland1113ff92014-05-29 17:29:51 +010034#define PERF_MAP_ALL_UNSUPPORTED \
35 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
36
37#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
38[0 ... C(MAX) - 1] = { \
39 [0 ... C(OP_MAX) - 1] = { \
40 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
41 }, \
42}
43
Mark Rutland0ce47082011-05-19 10:07:57 +010044/* The events for a given PMU register set. */
45struct pmu_hw_events {
46 /*
47 * The events that are active on the PMU for the given index.
48 */
Mark Rutlanda4560842014-05-13 19:08:19 +010049 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
Mark Rutland0ce47082011-05-19 10:07:57 +010050
51 /*
52 * A 1 bit for an index indicates that the counter is being used for
53 * an event. A 0 means that the counter can be used.
54 */
Mark Rutlanda4560842014-05-13 19:08:19 +010055 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +010056
57 /*
58 * Hardware lock to serialize accesses to PMU registers. Needed for the
59 * read/modify/write sequences.
60 */
61 raw_spinlock_t pmu_lock;
Mark Rutland5ebd9202014-05-13 19:46:10 +010062
63 /*
64 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
65 * already have to allocate this struct per cpu.
66 */
67 struct arm_pmu *percpu_pmu;
Mark Rutland7ed98e02017-03-10 10:46:14 +000068
69 int irq;
Mark Rutland0ce47082011-05-19 10:07:57 +010070};
71
Mark Rutland86cdd722016-09-09 14:08:26 +010072enum armpmu_attr_groups {
Mark Rutland48538b52016-09-09 14:08:30 +010073 ARMPMU_ATTR_GROUP_COMMON,
Mark Rutland86cdd722016-09-09 14:08:26 +010074 ARMPMU_ATTR_GROUP_EVENTS,
75 ARMPMU_ATTR_GROUP_FORMATS,
76 ARMPMU_NR_ATTR_GROUPS
77};
78
Mark Rutland0ce47082011-05-19 10:07:57 +010079struct arm_pmu {
80 struct pmu pmu;
Mark Rutlandcc88116d2015-05-13 17:12:25 +010081 cpumask_t supported_cpus;
Will Deacon4295b892012-07-06 15:45:00 +010082 char *name;
Mark Rutland0788f1e2018-05-10 11:35:15 +010083 irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010084 void (*enable)(struct perf_event *event);
85 void (*disable)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010086 int (*get_event_idx)(struct pmu_hw_events *hw_events,
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010087 struct perf_event *event);
Stephen Boydeab443e2014-02-07 21:01:22 +000088 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
89 struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010090 int (*set_event_filter)(struct hw_perf_event *evt,
91 struct perf_event_attr *attr);
Suzuki K Poulose3a952002018-07-10 09:57:59 +010092 u64 (*read_counter)(struct perf_event *event);
93 void (*write_counter)(struct perf_event *event, u64 val);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010094 void (*start)(struct arm_pmu *);
95 void (*stop)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010096 void (*reset)(void *);
97 int (*map_event)(struct perf_event *event);
Will Deaconca2b4972018-10-05 13:24:36 +010098 int (*filter_match)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010099 int num_events;
Martin Fuzzey8d1a0ae2016-01-13 23:36:26 -0500100 bool secure_access; /* 32-bit ARM only */
Will Deacon342e53b2018-10-05 13:28:07 +0100101#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700102 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Will Deacon342e53b2018-10-05 13:28:07 +0100103#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
104 DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +0100105 struct platform_device *plat_device;
Mark Rutland11679252014-05-13 19:36:31 +0100106 struct pmu_hw_events __percpu *hw_events;
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200107 struct hlist_node node;
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000108 struct notifier_block cpu_pm_nb;
Mark Rutland86cdd722016-09-09 14:08:26 +0100109 /* the attr_groups array must be NULL-terminated */
110 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
Mark Rutland45736a72017-04-11 09:39:55 +0100111
112 /* Only to be used by ACPI probing code */
113 unsigned long acpi_cpuid;
Mark Rutland0ce47082011-05-19 10:07:57 +0100114};
115
116#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
117
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100118u64 armpmu_event_update(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100119
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100120int armpmu_event_set_period(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100121
Will Deacon6dbc0022012-07-29 12:36:28 +0100122int armpmu_map_event(struct perf_event *event,
123 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
124 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
125 [PERF_COUNT_HW_CACHE_OP_MAX]
126 [PERF_COUNT_HW_CACHE_RESULT_MAX],
127 u32 raw_event_mask);
128
Mark Rutland083c5212017-04-11 09:39:45 +0100129typedef int (*armpmu_init_fn)(struct arm_pmu *);
130
Mark Rutland548a86c2014-05-23 18:11:14 +0100131struct pmu_probe_info {
132 unsigned int cpuid;
133 unsigned int mask;
Mark Rutland083c5212017-04-11 09:39:45 +0100134 armpmu_init_fn init;
Mark Rutland548a86c2014-05-23 18:11:14 +0100135};
136
137#define PMU_PROBE(_cpuid, _mask, _fn) \
138{ \
139 .cpuid = (_cpuid), \
140 .mask = (_mask), \
141 .init = (_fn), \
142}
143
144#define ARM_PMU_PROBE(_cpuid, _fn) \
145 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
146
147#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
148
149#define XSCALE_PMU_PROBE(_version, _fn) \
150 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
151
Mark Rutlandcfdad292015-05-26 17:23:35 +0100152int arm_pmu_device_probe(struct platform_device *pdev,
153 const struct of_device_id *of_table,
154 const struct pmu_probe_info *probe_table);
155
Mark Rutland45736a72017-04-11 09:39:55 +0100156#ifdef CONFIG_ACPI
157int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
158#else
159static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
160#endif
161
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100162/* Internal functions only for core arm_pmu code */
163struct arm_pmu *armpmu_alloc(void);
Mark Rutland0dc1a182018-02-05 16:41:58 +0000164struct arm_pmu *armpmu_alloc_atomic(void);
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100165void armpmu_free(struct arm_pmu *pmu);
166int armpmu_register(struct arm_pmu *pmu);
Mark Rutland167e6142017-10-09 17:09:05 +0100167int armpmu_request_irq(int irq, int cpu);
168void armpmu_free_irq(int irq, int cpu);
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100169
Jeremy Linton85023b22016-09-14 17:32:31 -0500170#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
171
Mark Rutlandfa8ad782015-07-06 12:23:53 +0100172#endif /* CONFIG_ARM_PMU */
Mark Rutland0ce47082011-05-19 10:07:57 +0100173
Jamie Iles0f4f0672010-02-02 20:23:15 +0100174#endif /* __ARM_PMU_H__ */