blob: 90114faa9f3c7c087f6fce6b871b16ab5e433b44 [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053017
Will Deaconb0e89592011-07-26 22:10:28 +010018/*
19 * Types of PMUs that can be accessed directly and require mutual
20 * exclusion between profiling tools.
21 */
Will Deacon28d7f4e2010-04-29 17:11:45 +010022enum arm_pmu_type {
23 ARM_PMU_DEVICE_CPU = 0,
24 ARM_NUM_PMU_DEVICES,
25};
26
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053027/*
28 * struct arm_pmu_platdata - ARM PMU platform data
29 *
Ming Leie0516a62011-03-02 15:00:08 +080030 * @handle_irq: an optional handler which will be called from the
31 * interrupt and passed the address of the low level handler,
32 * and can be used to implement any platform specific handling
33 * before or after calling it.
34 * @enable_irq: an optional handler which will be called after
35 * request_irq and be used to handle some platform specific
36 * irq enablement
37 * @disable_irq: an optional handler which will be called before
38 * free_irq and be used to handle some platform specific
39 * irq disablement
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053040 */
41struct arm_pmu_platdata {
42 irqreturn_t (*handle_irq)(int irq, void *dev,
43 irq_handler_t pmu_handler);
Ming Leie0516a62011-03-02 15:00:08 +080044 void (*enable_irq)(int irq);
45 void (*disable_irq)(int irq);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053046};
47
Jamie Iles0f4f0672010-02-02 20:23:15 +010048#ifdef CONFIG_CPU_HAS_PMU
49
Jamie Iles0f4f0672010-02-02 20:23:15 +010050/**
51 * reserve_pmu() - reserve the hardware performance counters
52 *
53 * Reserve the hardware performance counters in the system for exclusive use.
Will Deaconb0e89592011-07-26 22:10:28 +010054 * Returns 0 on success or -EBUSY if the lock is already held.
Jamie Iles0f4f0672010-02-02 20:23:15 +010055 */
Will Deaconb0e89592011-07-26 22:10:28 +010056extern int
Mark Rutland7fdd3c42011-08-12 10:42:48 +010057reserve_pmu(enum arm_pmu_type type);
Jamie Iles0f4f0672010-02-02 20:23:15 +010058
59/**
60 * release_pmu() - Relinquish control of the performance counters
61 *
62 * Release the performance counters and allow someone else to use them.
Jamie Iles0f4f0672010-02-02 20:23:15 +010063 */
Will Deaconb0e89592011-07-26 22:10:28 +010064extern void
Mark Rutlandf12482c2011-06-22 15:30:51 +010065release_pmu(enum arm_pmu_type type);
Jamie Iles0f4f0672010-02-02 20:23:15 +010066
Jamie Iles0f4f0672010-02-02 20:23:15 +010067#else /* CONFIG_CPU_HAS_PMU */
68
Will Deacon49c006b2010-04-29 17:13:24 +010069#include <linux/err.h>
70
Will Deaconb0e89592011-07-26 22:10:28 +010071static inline int
Mark Rutland7fdd3c42011-08-12 10:42:48 +010072reserve_pmu(enum arm_pmu_type type)
Jamie Iles0f4f0672010-02-02 20:23:15 +010073{
Jamie Iles0f4f0672010-02-02 20:23:15 +010074 return -ENODEV;
75}
76
Will Deaconb0e89592011-07-26 22:10:28 +010077static inline void
78release_pmu(enum arm_pmu_type type) { }
Jamie Iles0f4f0672010-02-02 20:23:15 +010079
80#endif /* CONFIG_CPU_HAS_PMU */
81
Mark Rutland0ce47082011-05-19 10:07:57 +010082#ifdef CONFIG_HW_PERF_EVENTS
83
84/* The events for a given PMU register set. */
85struct pmu_hw_events {
86 /*
87 * The events that are active on the PMU for the given index.
88 */
89 struct perf_event **events;
90
91 /*
92 * A 1 bit for an index indicates that the counter is being used for
93 * an event. A 0 means that the counter can be used.
94 */
95 unsigned long *used_mask;
96
97 /*
98 * Hardware lock to serialize accesses to PMU registers. Needed for the
99 * read/modify/write sequences.
100 */
101 raw_spinlock_t pmu_lock;
102};
103
104struct arm_pmu {
105 struct pmu pmu;
106 enum arm_perf_pmu_ids id;
107 enum arm_pmu_type type;
108 cpumask_t active_irqs;
109 const char *name;
110 irqreturn_t (*handle_irq)(int irq_num, void *dev);
111 void (*enable)(struct hw_perf_event *evt, int idx);
112 void (*disable)(struct hw_perf_event *evt, int idx);
113 int (*get_event_idx)(struct pmu_hw_events *hw_events,
114 struct hw_perf_event *hwc);
115 int (*set_event_filter)(struct hw_perf_event *evt,
116 struct perf_event_attr *attr);
117 u32 (*read_counter)(int idx);
118 void (*write_counter)(int idx, u32 val);
119 void (*start)(void);
120 void (*stop)(void);
121 void (*reset)(void *);
122 int (*map_event)(struct perf_event *event);
123 int num_events;
124 atomic_t active_events;
125 struct mutex reserve_mutex;
126 u64 max_period;
127 struct platform_device *plat_device;
128 struct pmu_hw_events *(*get_hw_events)(void);
129};
130
131#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
132
133int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
134
135u64 armpmu_event_update(struct perf_event *event,
136 struct hw_perf_event *hwc,
Will Deacon57273472012-03-06 17:33:17 +0100137 int idx);
Mark Rutland0ce47082011-05-19 10:07:57 +0100138
139int armpmu_event_set_period(struct perf_event *event,
140 struct hw_perf_event *hwc,
141 int idx);
142
143#endif /* CONFIG_HW_PERF_EVENTS */
144
Jamie Iles0f4f0672010-02-02 20:23:15 +0100145#endif /* __ARM_PMU_H__ */