| Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm/include/asm/pmu.h | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  * | 
 | 10 |  */ | 
 | 11 |  | 
 | 12 | #ifndef __ARM_PMU_H__ | 
 | 13 | #define __ARM_PMU_H__ | 
 | 14 |  | 
| Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 15 | #include <linux/interrupt.h> | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 16 | #include <linux/perf_event.h> | 
| Mark Rutland | 86cdd72 | 2016-09-09 14:08:26 +0100 | [diff] [blame] | 17 | #include <linux/sysfs.h> | 
| Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 18 | #include <asm/cputype.h> | 
 | 19 |  | 
| Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 20 | /* | 
| Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 21 |  * struct arm_pmu_platdata - ARM PMU platform data | 
 | 22 |  * | 
| Ming Lei | e0516a6 | 2011-03-02 15:00:08 +0800 | [diff] [blame] | 23 |  * @handle_irq: an optional handler which will be called from the | 
 | 24 |  *	interrupt and passed the address of the low level handler, | 
 | 25 |  *	and can be used to implement any platform specific handling | 
 | 26 |  *	before or after calling it. | 
| Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 27 |  */ | 
 | 28 | struct arm_pmu_platdata { | 
 | 29 | 	irqreturn_t (*handle_irq)(int irq, void *dev, | 
 | 30 | 				  irq_handler_t pmu_handler); | 
 | 31 | }; | 
 | 32 |  | 
| Mark Rutland | fa8ad78 | 2015-07-06 12:23:53 +0100 | [diff] [blame] | 33 | #ifdef CONFIG_ARM_PMU | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 34 |  | 
| Mark Rutland | ac8674d | 2014-05-28 18:08:40 +0100 | [diff] [blame] | 35 | /* | 
 | 36 |  * The ARMv7 CPU PMU supports up to 32 event counters. | 
 | 37 |  */ | 
 | 38 | #define ARMPMU_MAX_HWEVENTS		32 | 
 | 39 |  | 
 | 40 | #define HW_OP_UNSUPPORTED		0xFFFF | 
 | 41 | #define C(_x)				PERF_COUNT_HW_CACHE_##_x | 
 | 42 | #define CACHE_OP_UNSUPPORTED		0xFFFF | 
 | 43 |  | 
| Mark Rutland | 1113ff9 | 2014-05-29 17:29:51 +0100 | [diff] [blame] | 44 | #define PERF_MAP_ALL_UNSUPPORTED					\ | 
 | 45 | 	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED | 
 | 46 |  | 
 | 47 | #define PERF_CACHE_MAP_ALL_UNSUPPORTED					\ | 
 | 48 | [0 ... C(MAX) - 1] = {							\ | 
 | 49 | 	[0 ... C(OP_MAX) - 1] = {					\ | 
 | 50 | 		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\ | 
 | 51 | 	},								\ | 
 | 52 | } | 
 | 53 |  | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 54 | /* The events for a given PMU register set. */ | 
 | 55 | struct pmu_hw_events { | 
 | 56 | 	/* | 
 | 57 | 	 * The events that are active on the PMU for the given index. | 
 | 58 | 	 */ | 
| Mark Rutland | a456084 | 2014-05-13 19:08:19 +0100 | [diff] [blame] | 59 | 	struct perf_event	*events[ARMPMU_MAX_HWEVENTS]; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 60 |  | 
 | 61 | 	/* | 
 | 62 | 	 * A 1 bit for an index indicates that the counter is being used for | 
 | 63 | 	 * an event. A 0 means that the counter can be used. | 
 | 64 | 	 */ | 
| Mark Rutland | a456084 | 2014-05-13 19:08:19 +0100 | [diff] [blame] | 65 | 	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 66 |  | 
 | 67 | 	/* | 
 | 68 | 	 * Hardware lock to serialize accesses to PMU registers. Needed for the | 
 | 69 | 	 * read/modify/write sequences. | 
 | 70 | 	 */ | 
 | 71 | 	raw_spinlock_t		pmu_lock; | 
| Mark Rutland | 5ebd920 | 2014-05-13 19:46:10 +0100 | [diff] [blame] | 72 |  | 
 | 73 | 	/* | 
 | 74 | 	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we | 
 | 75 | 	 * already have to allocate this struct per cpu. | 
 | 76 | 	 */ | 
 | 77 | 	struct arm_pmu		*percpu_pmu; | 
| Mark Rutland | 7ed98e0 | 2017-03-10 10:46:14 +0000 | [diff] [blame] | 78 |  | 
 | 79 | 	int irq; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 80 | }; | 
 | 81 |  | 
| Mark Rutland | 86cdd72 | 2016-09-09 14:08:26 +0100 | [diff] [blame] | 82 | enum armpmu_attr_groups { | 
| Mark Rutland | 48538b5 | 2016-09-09 14:08:30 +0100 | [diff] [blame] | 83 | 	ARMPMU_ATTR_GROUP_COMMON, | 
| Mark Rutland | 86cdd72 | 2016-09-09 14:08:26 +0100 | [diff] [blame] | 84 | 	ARMPMU_ATTR_GROUP_EVENTS, | 
 | 85 | 	ARMPMU_ATTR_GROUP_FORMATS, | 
 | 86 | 	ARMPMU_NR_ATTR_GROUPS | 
 | 87 | }; | 
 | 88 |  | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 89 | struct arm_pmu { | 
 | 90 | 	struct pmu	pmu; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 91 | 	cpumask_t	active_irqs; | 
| Mark Rutland | cc88116d | 2015-05-13 17:12:25 +0100 | [diff] [blame] | 92 | 	cpumask_t	supported_cpus; | 
| Will Deacon | 4295b89 | 2012-07-06 15:45:00 +0100 | [diff] [blame] | 93 | 	char		*name; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 94 | 	irqreturn_t	(*handle_irq)(int irq_num, void *dev); | 
| Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 95 | 	void		(*enable)(struct perf_event *event); | 
 | 96 | 	void		(*disable)(struct perf_event *event); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 97 | 	int		(*get_event_idx)(struct pmu_hw_events *hw_events, | 
| Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 98 | 					 struct perf_event *event); | 
| Stephen Boyd | eab443e | 2014-02-07 21:01:22 +0000 | [diff] [blame] | 99 | 	void		(*clear_event_idx)(struct pmu_hw_events *hw_events, | 
 | 100 | 					 struct perf_event *event); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 101 | 	int		(*set_event_filter)(struct hw_perf_event *evt, | 
 | 102 | 					    struct perf_event_attr *attr); | 
| Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 103 | 	u32		(*read_counter)(struct perf_event *event); | 
 | 104 | 	void		(*write_counter)(struct perf_event *event, u32 val); | 
 | 105 | 	void		(*start)(struct arm_pmu *); | 
 | 106 | 	void		(*stop)(struct arm_pmu *); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 107 | 	void		(*reset)(void *); | 
 | 108 | 	int		(*map_event)(struct perf_event *event); | 
 | 109 | 	int		num_events; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 110 | 	u64		max_period; | 
| Martin Fuzzey | 8d1a0ae | 2016-01-13 23:36:26 -0500 | [diff] [blame] | 111 | 	bool		secure_access; /* 32-bit ARM only */ | 
| Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 112 | #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 | 
 | 113 | 	DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 114 | 	struct platform_device	*plat_device; | 
| Mark Rutland | 1167925 | 2014-05-13 19:36:31 +0100 | [diff] [blame] | 115 | 	struct pmu_hw_events	__percpu *hw_events; | 
| Sebastian Andrzej Siewior | 6e103c0 | 2016-08-17 19:14:20 +0200 | [diff] [blame] | 116 | 	struct hlist_node	node; | 
| Lorenzo Pieralisi | da4e4f1 | 2016-02-23 18:22:39 +0000 | [diff] [blame] | 117 | 	struct notifier_block	cpu_pm_nb; | 
| Mark Rutland | 86cdd72 | 2016-09-09 14:08:26 +0100 | [diff] [blame] | 118 | 	/* the attr_groups array must be NULL-terminated */ | 
 | 119 | 	const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; | 
| Mark Rutland | 45736a7 | 2017-04-11 09:39:55 +0100 | [diff] [blame] | 120 |  | 
 | 121 | 	/* Only to be used by ACPI probing code */ | 
 | 122 | 	unsigned long acpi_cpuid; | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 123 | }; | 
 | 124 |  | 
 | 125 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | 
 | 126 |  | 
| Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 127 | u64 armpmu_event_update(struct perf_event *event); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 128 |  | 
| Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 129 | int armpmu_event_set_period(struct perf_event *event); | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 130 |  | 
| Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 131 | int armpmu_map_event(struct perf_event *event, | 
 | 132 | 		     const unsigned (*event_map)[PERF_COUNT_HW_MAX], | 
 | 133 | 		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | 
 | 134 | 						[PERF_COUNT_HW_CACHE_OP_MAX] | 
 | 135 | 						[PERF_COUNT_HW_CACHE_RESULT_MAX], | 
 | 136 | 		     u32 raw_event_mask); | 
 | 137 |  | 
| Mark Rutland | 083c521 | 2017-04-11 09:39:45 +0100 | [diff] [blame] | 138 | typedef int (*armpmu_init_fn)(struct arm_pmu *); | 
 | 139 |  | 
| Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 140 | struct pmu_probe_info { | 
 | 141 | 	unsigned int cpuid; | 
 | 142 | 	unsigned int mask; | 
| Mark Rutland | 083c521 | 2017-04-11 09:39:45 +0100 | [diff] [blame] | 143 | 	armpmu_init_fn init; | 
| Mark Rutland | 548a86c | 2014-05-23 18:11:14 +0100 | [diff] [blame] | 144 | }; | 
 | 145 |  | 
 | 146 | #define PMU_PROBE(_cpuid, _mask, _fn)	\ | 
 | 147 | {					\ | 
 | 148 | 	.cpuid = (_cpuid),		\ | 
 | 149 | 	.mask = (_mask),		\ | 
 | 150 | 	.init = (_fn),			\ | 
 | 151 | } | 
 | 152 |  | 
 | 153 | #define ARM_PMU_PROBE(_cpuid, _fn) \ | 
 | 154 | 	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) | 
 | 155 |  | 
 | 156 | #define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) | 
 | 157 |  | 
 | 158 | #define XSCALE_PMU_PROBE(_version, _fn) \ | 
 | 159 | 	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) | 
 | 160 |  | 
| Mark Rutland | cfdad29 | 2015-05-26 17:23:35 +0100 | [diff] [blame] | 161 | int arm_pmu_device_probe(struct platform_device *pdev, | 
 | 162 | 			 const struct of_device_id *of_table, | 
 | 163 | 			 const struct pmu_probe_info *probe_table); | 
 | 164 |  | 
| Mark Rutland | 45736a7 | 2017-04-11 09:39:55 +0100 | [diff] [blame] | 165 | #ifdef CONFIG_ACPI | 
 | 166 | int arm_pmu_acpi_probe(armpmu_init_fn init_fn); | 
 | 167 | #else | 
 | 168 | static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } | 
 | 169 | #endif | 
 | 170 |  | 
| Mark Rutland | 18bfcfe | 2017-04-11 09:39:53 +0100 | [diff] [blame] | 171 | /* Internal functions only for core arm_pmu code */ | 
 | 172 | struct arm_pmu *armpmu_alloc(void); | 
 | 173 | void armpmu_free(struct arm_pmu *pmu); | 
 | 174 | int armpmu_register(struct arm_pmu *pmu); | 
 | 175 | int armpmu_request_irqs(struct arm_pmu *armpmu); | 
 | 176 | void armpmu_free_irqs(struct arm_pmu *armpmu); | 
| Mark Rutland | 45736a7 | 2017-04-11 09:39:55 +0100 | [diff] [blame] | 177 | int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); | 
 | 178 | void armpmu_free_irq(struct arm_pmu *armpmu, int cpu); | 
| Mark Rutland | 18bfcfe | 2017-04-11 09:39:53 +0100 | [diff] [blame] | 179 |  | 
| Jeremy Linton | 85023b2 | 2016-09-14 17:32:31 -0500 | [diff] [blame] | 180 | #define ARMV8_PMU_PDEV_NAME "armv8-pmu" | 
 | 181 |  | 
| Mark Rutland | fa8ad78 | 2015-07-06 12:23:53 +0100 | [diff] [blame] | 182 | #endif /* CONFIG_ARM_PMU */ | 
| Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 183 |  | 
| Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 184 | #endif /* __ARM_PMU_H__ */ |