blob: e18843809eec0f598018919e002dfb8dee58360d [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053017
Mark Rutland548a86c2014-05-23 18:11:14 +010018#include <asm/cputype.h>
19
Will Deaconb0e89592011-07-26 22:10:28 +010020/*
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053021 * struct arm_pmu_platdata - ARM PMU platform data
22 *
Ming Leie0516a62011-03-02 15:00:08 +080023 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053027 */
28struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler);
31};
32
Mark Rutlandfa8ad782015-07-06 12:23:53 +010033#ifdef CONFIG_ARM_PMU
Mark Rutland0ce47082011-05-19 10:07:57 +010034
Mark Rutlandac8674d2014-05-28 18:08:40 +010035/*
36 * The ARMv7 CPU PMU supports up to 32 event counters.
37 */
38#define ARMPMU_MAX_HWEVENTS 32
39
40#define HW_OP_UNSUPPORTED 0xFFFF
41#define C(_x) PERF_COUNT_HW_CACHE_##_x
42#define CACHE_OP_UNSUPPORTED 0xFFFF
43
Mark Rutland1113ff92014-05-29 17:29:51 +010044#define PERF_MAP_ALL_UNSUPPORTED \
45 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
46
47#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
48[0 ... C(MAX) - 1] = { \
49 [0 ... C(OP_MAX) - 1] = { \
50 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
51 }, \
52}
53
Mark Rutland0ce47082011-05-19 10:07:57 +010054/* The events for a given PMU register set. */
55struct pmu_hw_events {
56 /*
57 * The events that are active on the PMU for the given index.
58 */
Mark Rutlanda4560842014-05-13 19:08:19 +010059 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
Mark Rutland0ce47082011-05-19 10:07:57 +010060
61 /*
62 * A 1 bit for an index indicates that the counter is being used for
63 * an event. A 0 means that the counter can be used.
64 */
Mark Rutlanda4560842014-05-13 19:08:19 +010065 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +010066
67 /*
68 * Hardware lock to serialize accesses to PMU registers. Needed for the
69 * read/modify/write sequences.
70 */
71 raw_spinlock_t pmu_lock;
Mark Rutland5ebd9202014-05-13 19:46:10 +010072
73 /*
74 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
75 * already have to allocate this struct per cpu.
76 */
77 struct arm_pmu *percpu_pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +010078};
79
80struct arm_pmu {
81 struct pmu pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +010082 cpumask_t active_irqs;
Mark Rutlandcc881162015-05-13 17:12:25 +010083 cpumask_t supported_cpus;
Will Deacon9fd85eb2015-03-06 11:54:09 +000084 int *irq_affinity;
Will Deacon4295b892012-07-06 15:45:00 +010085 char *name;
Mark Rutland0ce47082011-05-19 10:07:57 +010086 irqreturn_t (*handle_irq)(int irq_num, void *dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010087 void (*enable)(struct perf_event *event);
88 void (*disable)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010089 int (*get_event_idx)(struct pmu_hw_events *hw_events,
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010090 struct perf_event *event);
Stephen Boydeab443e2014-02-07 21:01:22 +000091 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010093 int (*set_event_filter)(struct hw_perf_event *evt,
94 struct perf_event_attr *attr);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010095 u32 (*read_counter)(struct perf_event *event);
96 void (*write_counter)(struct perf_event *event, u32 val);
97 void (*start)(struct arm_pmu *);
98 void (*stop)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010099 void (*reset)(void *);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100100 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
101 void (*free_irq)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +0100102 int (*map_event)(struct perf_event *event);
103 int num_events;
104 atomic_t active_events;
105 struct mutex reserve_mutex;
106 u64 max_period;
Martin Fuzzey8d1a0ae2016-01-13 23:36:26 -0500107 bool secure_access; /* 32-bit ARM only */
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700108#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
109 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Mark Rutland0ce47082011-05-19 10:07:57 +0100110 struct platform_device *plat_device;
Mark Rutland11679252014-05-13 19:36:31 +0100111 struct pmu_hw_events __percpu *hw_events;
Sebastian Andrzej Siewior37b502f2016-07-20 09:51:11 +0200112 struct list_head entry;
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000113 struct notifier_block cpu_pm_nb;
Mark Rutland0ce47082011-05-19 10:07:57 +0100114};
115
116#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
117
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100118u64 armpmu_event_update(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100119
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100120int armpmu_event_set_period(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100121
Will Deacon6dbc0022012-07-29 12:36:28 +0100122int armpmu_map_event(struct perf_event *event,
123 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
124 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
125 [PERF_COUNT_HW_CACHE_OP_MAX]
126 [PERF_COUNT_HW_CACHE_RESULT_MAX],
127 u32 raw_event_mask);
128
Mark Rutland548a86c2014-05-23 18:11:14 +0100129struct pmu_probe_info {
130 unsigned int cpuid;
131 unsigned int mask;
132 int (*init)(struct arm_pmu *);
133};
134
135#define PMU_PROBE(_cpuid, _mask, _fn) \
136{ \
137 .cpuid = (_cpuid), \
138 .mask = (_mask), \
139 .init = (_fn), \
140}
141
142#define ARM_PMU_PROBE(_cpuid, _fn) \
143 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
144
145#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
146
147#define XSCALE_PMU_PROBE(_version, _fn) \
148 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
149
Mark Rutlandcfdad292015-05-26 17:23:35 +0100150int arm_pmu_device_probe(struct platform_device *pdev,
151 const struct of_device_id *of_table,
152 const struct pmu_probe_info *probe_table);
153
Mark Rutlandfa8ad782015-07-06 12:23:53 +0100154#endif /* CONFIG_ARM_PMU */
Mark Rutland0ce47082011-05-19 10:07:57 +0100155
Jamie Iles0f4f0672010-02-02 20:23:15 +0100156#endif /* __ARM_PMU_H__ */