blob: d2aaffbfd60d0517ff677983e63f977b7a07f7d9 [file] [log] [blame]
Saravana Kannaned84c522014-05-28 18:59:54 -07001/*
Santosh Mardi2aa23262018-01-09 12:37:45 +05302 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Saravana Kannaned84c522014-05-28 18:59:54 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "bimc-bwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/delay.h>
Rohit Guptaaf503812016-01-15 16:12:41 -080021#include <linux/bitops.h>
Saravana Kannaned84c522014-05-28 18:59:54 -070022#include <linux/err.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
Saravana Kannan75206c22014-09-22 17:42:57 -070027#include <linux/of_device.h>
Saravana Kannaned84c522014-05-28 18:59:54 -070028#include <linux/spinlock.h>
Stephen Boyd6d867b92017-04-06 17:49:16 -070029#include <linux/log2.h>
30#include <linux/sizes.h>
Saravana Kannaned84c522014-05-28 18:59:54 -070031#include "governor_bw_hwmon.h"
32
33#define GLB_INT_STATUS(m) ((m)->global_base + 0x100)
34#define GLB_INT_CLR(m) ((m)->global_base + 0x108)
35#define GLB_INT_EN(m) ((m)->global_base + 0x10C)
36#define MON_INT_STATUS(m) ((m)->base + 0x100)
Stephen Boydd863ed12017-04-03 16:01:04 -070037#define MON_INT_STATUS_MASK 0x03
Santosh Mardi2aa23262018-01-09 12:37:45 +053038#define MON2_INT_STATUS_MASK 0xA0
39#define MON2_INT_DISABLE_MASK 0xF0
Stephen Boydd863ed12017-04-03 16:01:04 -070040#define MON2_INT_STATUS_SHIFT 4
Saravana Kannaned84c522014-05-28 18:59:54 -070041#define MON_INT_CLR(m) ((m)->base + 0x108)
42#define MON_INT_EN(m) ((m)->base + 0x10C)
Stephen Boydd863ed12017-04-03 16:01:04 -070043#define MON_INT_ENABLE 0x1
Saravana Kannaned84c522014-05-28 18:59:54 -070044#define MON_EN(m) ((m)->base + 0x280)
45#define MON_CLEAR(m) ((m)->base + 0x284)
46#define MON_CNT(m) ((m)->base + 0x288)
47#define MON_THRES(m) ((m)->base + 0x290)
48#define MON_MASK(m) ((m)->base + 0x298)
49#define MON_MATCH(m) ((m)->base + 0x29C)
50
Rohit Guptaaf503812016-01-15 16:12:41 -080051#define MON2_EN(m) ((m)->base + 0x2A0)
52#define MON2_CLEAR(m) ((m)->base + 0x2A4)
53#define MON2_SW(m) ((m)->base + 0x2A8)
54#define MON2_THRES_HI(m) ((m)->base + 0x2AC)
55#define MON2_THRES_MED(m) ((m)->base + 0x2B0)
56#define MON2_THRES_LO(m) ((m)->base + 0x2B4)
57#define MON2_ZONE_ACTIONS(m) ((m)->base + 0x2B8)
58#define MON2_ZONE_CNT_THRES(m) ((m)->base + 0x2BC)
59#define MON2_BYTE_CNT(m) ((m)->base + 0x2D0)
60#define MON2_WIN_TIMER(m) ((m)->base + 0x2D4)
61#define MON2_ZONE_CNT(m) ((m)->base + 0x2D8)
62#define MON2_ZONE_MAX(m, zone) ((m)->base + 0x2E0 + 0x4 * zone)
63
Stephen Boydd863ed12017-04-03 16:01:04 -070064#define MON3_INT_STATUS(m) ((m)->base + 0x00)
65#define MON3_INT_CLR(m) ((m)->base + 0x08)
66#define MON3_INT_EN(m) ((m)->base + 0x0C)
Santosh Mardi2aa23262018-01-09 12:37:45 +053067#define MON3_INT_STATUS_MASK 0x0A
68#define MON3_INT_DISABLE_MASK 0x0F
Stephen Boydd863ed12017-04-03 16:01:04 -070069#define MON3_EN(m) ((m)->base + 0x10)
70#define MON3_CLEAR(m) ((m)->base + 0x14)
Stephen Boyda510acd2017-04-03 16:10:02 -070071#define MON3_MASK(m) ((m)->base + 0x18)
72#define MON3_MATCH(m) ((m)->base + 0x1C)
Stephen Boydd863ed12017-04-03 16:01:04 -070073#define MON3_SW(m) ((m)->base + 0x20)
74#define MON3_THRES_HI(m) ((m)->base + 0x24)
75#define MON3_THRES_MED(m) ((m)->base + 0x28)
76#define MON3_THRES_LO(m) ((m)->base + 0x2C)
77#define MON3_ZONE_ACTIONS(m) ((m)->base + 0x30)
78#define MON3_ZONE_CNT_THRES(m) ((m)->base + 0x34)
79#define MON3_BYTE_CNT(m) ((m)->base + 0x38)
80#define MON3_WIN_TIMER(m) ((m)->base + 0x3C)
81#define MON3_ZONE_CNT(m) ((m)->base + 0x40)
82#define MON3_ZONE_MAX(m, zone) ((m)->base + 0x44 + 0x4 * zone)
83
Stephen Boydd16cd622017-05-10 10:05:39 -070084enum mon_reg_type {
85 MON1,
86 MON2,
Stephen Boydd863ed12017-04-03 16:01:04 -070087 MON3,
Stephen Boyda59c9182017-04-03 14:16:56 -070088};
89
Saravana Kannan75206c22014-09-22 17:42:57 -070090struct bwmon_spec {
91 bool wrap_on_thres;
92 bool overflow;
Rohit Gupta4d1f4f42015-05-08 12:04:56 -070093 bool throt_adj;
Rohit Guptaaf503812016-01-15 16:12:41 -080094 bool hw_sampling;
Stephen Boydd863ed12017-04-03 16:01:04 -070095 bool has_global_base;
96 enum mon_reg_type reg_type;
Saravana Kannan75206c22014-09-22 17:42:57 -070097};
98
Saravana Kannaned84c522014-05-28 18:59:54 -070099struct bwmon {
100 void __iomem *base;
101 void __iomem *global_base;
102 unsigned int mport;
Stephen Boydeee45082017-04-03 16:30:38 -0700103 int irq;
Saravana Kannan75206c22014-09-22 17:42:57 -0700104 const struct bwmon_spec *spec;
Saravana Kannaned84c522014-05-28 18:59:54 -0700105 struct device *dev;
106 struct bw_hwmon hw;
Rohit Guptaaf503812016-01-15 16:12:41 -0800107 u32 hw_timer_hz;
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700108 u32 throttle_adj;
Rohit Guptaaf503812016-01-15 16:12:41 -0800109 u32 sample_size_ms;
110 u32 intr_status;
Stephen Boyd6d867b92017-04-06 17:49:16 -0700111 u8 count_shift;
112 u32 thres_lim;
Stephen Boyda510acd2017-04-03 16:10:02 -0700113 u32 byte_mask;
114 u32 byte_match;
Saravana Kannaned84c522014-05-28 18:59:54 -0700115};
116
117#define to_bwmon(ptr) container_of(ptr, struct bwmon, hw)
118
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700119#define ENABLE_MASK BIT(0)
120#define THROTTLE_MASK 0x1F
121#define THROTTLE_SHIFT 16
122
Saravana Kannaned84c522014-05-28 18:59:54 -0700123static DEFINE_SPINLOCK(glb_lock);
Stephen Boyda59c9182017-04-03 14:16:56 -0700124
Stephen Boydd16cd622017-05-10 10:05:39 -0700125static __always_inline void mon_enable(struct bwmon *m, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700126{
Stephen Boyda59c9182017-04-03 14:16:56 -0700127 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700128 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700129 writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
130 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700131 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700132 writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
133 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700134 case MON3:
135 writel_relaxed(ENABLE_MASK | m->throttle_adj, MON3_EN(m));
136 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700137 }
Saravana Kannaned84c522014-05-28 18:59:54 -0700138}
139
Stephen Boydd16cd622017-05-10 10:05:39 -0700140static __always_inline void mon_disable(struct bwmon *m, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700141{
Stephen Boyda59c9182017-04-03 14:16:56 -0700142 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700143 case MON1:
Rohit Guptaaf503812016-01-15 16:12:41 -0800144 writel_relaxed(m->throttle_adj, MON_EN(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700145 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700146 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700147 writel_relaxed(m->throttle_adj, MON2_EN(m));
148 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700149 case MON3:
150 writel_relaxed(m->throttle_adj, MON3_EN(m));
151 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700152 }
Hanumath Prasad0548a952015-07-07 17:48:58 +0530153 /*
154 * mon_disable() and mon_irq_clear(),
155 * If latter goes first and count happen to trigger irq, we would
156 * have the irq line high but no one handling it.
157 */
158 mb();
Saravana Kannaned84c522014-05-28 18:59:54 -0700159}
160
Rohit Guptaaf503812016-01-15 16:12:41 -0800161#define MON_CLEAR_BIT 0x1
162#define MON_CLEAR_ALL_BIT 0x2
Stephen Boyda59c9182017-04-03 14:16:56 -0700163static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700164void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700165{
Stephen Boyda59c9182017-04-03 14:16:56 -0700166 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700167 case MON1:
Rohit Guptaaf503812016-01-15 16:12:41 -0800168 writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700169 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700170 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700171 if (clear_all)
172 writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
173 else
174 writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
175 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700176 case MON3:
177 if (clear_all)
178 writel_relaxed(MON_CLEAR_ALL_BIT, MON3_CLEAR(m));
179 else
180 writel_relaxed(MON_CLEAR_BIT, MON3_CLEAR(m));
181 break;
Rohit Guptaaf503812016-01-15 16:12:41 -0800182 }
Saravana Kannaned84c522014-05-28 18:59:54 -0700183 /*
184 * The counter clear and IRQ clear bits are not in the same 4KB
185 * region. So, we need to make sure the counter clear is completed
186 * before we try to clear the IRQ or do any other counter operations.
187 */
188 mb();
189}
190
Rohit Guptaaf503812016-01-15 16:12:41 -0800191#define SAMPLE_WIN_LIM 0xFFFFF
Stephen Boydd863ed12017-04-03 16:01:04 -0700192static __always_inline
193void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms,
194 enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800195{
196 u32 rate;
197
198 if (unlikely(sample_ms != m->sample_size_ms)) {
199 rate = mult_frac(sample_ms, m->hw_timer_hz, MSEC_PER_SEC);
200 m->sample_size_ms = sample_ms;
201 if (unlikely(rate > SAMPLE_WIN_LIM)) {
202 rate = SAMPLE_WIN_LIM;
203 pr_warn("Sample window %u larger than hw limit: %u\n",
204 rate, SAMPLE_WIN_LIM);
205 }
Stephen Boydd863ed12017-04-03 16:01:04 -0700206 switch (type) {
207 case MON1:
208 WARN(1, "Invalid\n");
209 return;
210 case MON2:
211 writel_relaxed(rate, MON2_SW(m));
212 break;
213 case MON3:
214 writel_relaxed(rate, MON3_SW(m));
215 break;
216 }
Rohit Guptaaf503812016-01-15 16:12:41 -0800217 }
218}
219
Stephen Boyda59c9182017-04-03 14:16:56 -0700220static void mon_glb_irq_enable(struct bwmon *m)
Saravana Kannaned84c522014-05-28 18:59:54 -0700221{
222 u32 val;
223
Saravana Kannaned84c522014-05-28 18:59:54 -0700224 val = readl_relaxed(GLB_INT_EN(m));
225 val |= 1 << m->mport;
226 writel_relaxed(val, GLB_INT_EN(m));
Saravana Kannaned84c522014-05-28 18:59:54 -0700227}
228
Stephen Boyda59c9182017-04-03 14:16:56 -0700229static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700230void mon_irq_enable(struct bwmon *m, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700231{
232 u32 val;
233
234 spin_lock(&glb_lock);
Stephen Boyda59c9182017-04-03 14:16:56 -0700235 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700236 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700237 mon_glb_irq_enable(m);
238 val = readl_relaxed(MON_INT_EN(m));
Stephen Boydd863ed12017-04-03 16:01:04 -0700239 val |= MON_INT_ENABLE;
Stephen Boyda59c9182017-04-03 14:16:56 -0700240 writel_relaxed(val, MON_INT_EN(m));
241 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700242 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700243 mon_glb_irq_enable(m);
244 val = readl_relaxed(MON_INT_EN(m));
Stephen Boydd863ed12017-04-03 16:01:04 -0700245 val |= MON2_INT_STATUS_MASK;
Stephen Boyda59c9182017-04-03 14:16:56 -0700246 writel_relaxed(val, MON_INT_EN(m));
247 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700248 case MON3:
249 val = readl_relaxed(MON3_INT_EN(m));
250 val |= MON3_INT_STATUS_MASK;
251 writel_relaxed(val, MON3_INT_EN(m));
252 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700253 }
Rohit Guptaaf503812016-01-15 16:12:41 -0800254 spin_unlock(&glb_lock);
Hanumath Prasad0548a952015-07-07 17:48:58 +0530255 /*
Stephen Boyda59c9182017-04-03 14:16:56 -0700256 * make sure irq enable complete for local and global
Hanumath Prasad0548a952015-07-07 17:48:58 +0530257 * to avoid race with other monitor calls
258 */
259 mb();
Saravana Kannaned84c522014-05-28 18:59:54 -0700260}
261
Stephen Boyda59c9182017-04-03 14:16:56 -0700262static void mon_glb_irq_disable(struct bwmon *m)
263{
264 u32 val;
265
266 val = readl_relaxed(GLB_INT_EN(m));
267 val &= ~(1 << m->mport);
268 writel_relaxed(val, GLB_INT_EN(m));
269}
270
271static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700272void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
Stephen Boyda59c9182017-04-03 14:16:56 -0700273{
274 u32 val;
275
276 spin_lock(&glb_lock);
277
278 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700279 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700280 mon_glb_irq_disable(m);
281 val = readl_relaxed(MON_INT_EN(m));
Stephen Boydd863ed12017-04-03 16:01:04 -0700282 val &= ~MON_INT_ENABLE;
Stephen Boyda59c9182017-04-03 14:16:56 -0700283 writel_relaxed(val, MON_INT_EN(m));
284 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700285 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700286 mon_glb_irq_disable(m);
287 val = readl_relaxed(MON_INT_EN(m));
Santosh Mardi2aa23262018-01-09 12:37:45 +0530288 val &= ~MON2_INT_DISABLE_MASK;
Stephen Boyda59c9182017-04-03 14:16:56 -0700289 writel_relaxed(val, MON_INT_EN(m));
290 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700291 case MON3:
292 val = readl_relaxed(MON3_INT_EN(m));
Santosh Mardi2aa23262018-01-09 12:37:45 +0530293 val &= ~MON3_INT_DISABLE_MASK;
Stephen Boydd863ed12017-04-03 16:01:04 -0700294 writel_relaxed(val, MON3_INT_EN(m));
295 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700296 }
297 spin_unlock(&glb_lock);
298 /*
299 * make sure irq disable complete for local and global
300 * to avoid race with other monitor calls
301 */
302 mb();
303}
304
305static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700306unsigned int mon_irq_status(struct bwmon *m, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700307{
308 u32 mval;
309
Stephen Boyda59c9182017-04-03 14:16:56 -0700310 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700311 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700312 mval = readl_relaxed(MON_INT_STATUS(m));
313 dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
314 readl_relaxed(GLB_INT_STATUS(m)));
Stephen Boydd863ed12017-04-03 16:01:04 -0700315 mval &= MON_INT_STATUS_MASK;
Stephen Boyda59c9182017-04-03 14:16:56 -0700316 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700317 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700318 mval = readl_relaxed(MON_INT_STATUS(m));
319 dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
320 readl_relaxed(GLB_INT_STATUS(m)));
Stephen Boydd863ed12017-04-03 16:01:04 -0700321 mval &= MON2_INT_STATUS_MASK;
322 mval >>= MON2_INT_STATUS_SHIFT;
323 break;
324 case MON3:
325 mval = readl_relaxed(MON3_INT_STATUS(m));
326 dev_dbg(m->dev, "IRQ status p:%x\n", mval);
327 mval &= MON3_INT_STATUS_MASK;
Stephen Boyda59c9182017-04-03 14:16:56 -0700328 break;
329 }
Rohit Guptaaf503812016-01-15 16:12:41 -0800330
Saravana Kannan75206c22014-09-22 17:42:57 -0700331 return mval;
Saravana Kannaned84c522014-05-28 18:59:54 -0700332}
333
Stephen Boyda59c9182017-04-03 14:16:56 -0700334
335static void mon_glb_irq_clear(struct bwmon *m)
Saravana Kannaned84c522014-05-28 18:59:54 -0700336{
Stephen Boyda59c9182017-04-03 14:16:56 -0700337 /*
338 * Synchronize the local interrupt clear in mon_irq_clear()
339 * with the global interrupt clear here. Otherwise, the CPU
340 * may reorder the two writes and clear the global interrupt
341 * before the local interrupt, causing the global interrupt
342 * to be retriggered by the local interrupt still being high.
343 */
Saravana Kannaned84c522014-05-28 18:59:54 -0700344 mb();
345 writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700346 /*
347 * Similarly, because the global registers are in a different
348 * region than the local registers, we need to ensure any register
349 * writes to enable the monitor after this call are ordered with the
350 * clearing here so that local writes don't happen before the
351 * interrupt is cleared.
352 */
Saravana Kannaned84c522014-05-28 18:59:54 -0700353 mb();
354}
355
Stephen Boyda59c9182017-04-03 14:16:56 -0700356static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700357void mon_irq_clear(struct bwmon *m, enum mon_reg_type type)
Stephen Boyda59c9182017-04-03 14:16:56 -0700358{
359 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700360 case MON1:
Stephen Boydd863ed12017-04-03 16:01:04 -0700361 writel_relaxed(MON_INT_STATUS_MASK, MON_INT_CLR(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700362 mon_glb_irq_clear(m);
363 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700364 case MON2:
Stephen Boydd863ed12017-04-03 16:01:04 -0700365 writel_relaxed(MON2_INT_STATUS_MASK, MON_INT_CLR(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700366 mon_glb_irq_clear(m);
367 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700368 case MON3:
369 writel_relaxed(MON3_INT_STATUS_MASK, MON3_INT_CLR(m));
370 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700371 }
372}
373
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700374static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
375{
376 struct bwmon *m = to_bwmon(hw);
377
378 if (adj > THROTTLE_MASK)
379 return -EINVAL;
380
381 adj = (adj & THROTTLE_MASK) << THROTTLE_SHIFT;
382 m->throttle_adj = adj;
383
384 return 0;
385}
386
387static u32 mon_get_throttle_adj(struct bw_hwmon *hw)
388{
389 struct bwmon *m = to_bwmon(hw);
390
391 return m->throttle_adj >> THROTTLE_SHIFT;
392}
393
Rohit Guptaaf503812016-01-15 16:12:41 -0800394#define ZONE1_SHIFT 8
395#define ZONE2_SHIFT 16
396#define ZONE3_SHIFT 24
397#define ZONE0_ACTION 0x01 /* Increment zone 0 count */
398#define ZONE1_ACTION 0x09 /* Increment zone 1 & clear lower zones */
399#define ZONE2_ACTION 0x25 /* Increment zone 2 & clear lower zones */
400#define ZONE3_ACTION 0x95 /* Increment zone 3 & clear lower zones */
401static u32 calc_zone_actions(void)
402{
403 u32 zone_actions;
404
405 zone_actions = ZONE0_ACTION;
406 zone_actions |= ZONE1_ACTION << ZONE1_SHIFT;
407 zone_actions |= ZONE2_ACTION << ZONE2_SHIFT;
408 zone_actions |= ZONE3_ACTION << ZONE3_SHIFT;
409
410 return zone_actions;
411}
412
413#define ZONE_CNT_LIM 0xFFU
414#define UP_CNT_1 1
415static u32 calc_zone_counts(struct bw_hwmon *hw)
416{
417 u32 zone_counts;
418
419 zone_counts = ZONE_CNT_LIM;
420 zone_counts |= min(hw->down_cnt, ZONE_CNT_LIM) << ZONE1_SHIFT;
421 zone_counts |= ZONE_CNT_LIM << ZONE2_SHIFT;
422 zone_counts |= UP_CNT_1 << ZONE3_SHIFT;
423
424 return zone_counts;
425}
426
Stephen Boyd6d867b92017-04-06 17:49:16 -0700427#define MB_SHIFT 20
428
429static u32 mbps_to_count(unsigned long mbps, unsigned int ms, u8 shift)
Rohit Guptaaf503812016-01-15 16:12:41 -0800430{
431 mbps *= ms;
Stephen Boyd6d867b92017-04-06 17:49:16 -0700432
433 if (shift > MB_SHIFT)
434 mbps >>= shift - MB_SHIFT;
435 else
436 mbps <<= MB_SHIFT - shift;
437
438 return DIV_ROUND_UP(mbps, MSEC_PER_SEC);
Rohit Guptaaf503812016-01-15 16:12:41 -0800439}
440
441/*
442 * Define the 4 zones using HI, MED & LO thresholds:
443 * Zone 0: byte count < THRES_LO
444 * Zone 1: THRES_LO < byte count < THRES_MED
445 * Zone 2: THRES_MED < byte count < THRES_HI
Stephen Boyd6d867b92017-04-06 17:49:16 -0700446 * Zone 3: THRES_LIM > byte count > THRES_HI
Rohit Guptaaf503812016-01-15 16:12:41 -0800447 */
Stephen Boyd6d867b92017-04-06 17:49:16 -0700448#define THRES_LIM(shift) (0xFFFFFFFF >> shift)
449
Stephen Boydd863ed12017-04-03 16:01:04 -0700450static __always_inline
451void set_zone_thres(struct bwmon *m, unsigned int sample_ms,
452 enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800453{
Stephen Boydd863ed12017-04-03 16:01:04 -0700454 struct bw_hwmon *hw = &m->hw;
Rohit Guptaaf503812016-01-15 16:12:41 -0800455 u32 hi, med, lo;
Stephen Boydd863ed12017-04-03 16:01:04 -0700456 u32 zone_cnt_thres = calc_zone_counts(hw);
Rohit Guptaaf503812016-01-15 16:12:41 -0800457
Stephen Boyd6d867b92017-04-06 17:49:16 -0700458 hi = mbps_to_count(hw->up_wake_mbps, sample_ms, m->count_shift);
459 med = mbps_to_count(hw->down_wake_mbps, sample_ms, m->count_shift);
Rohit Guptaaf503812016-01-15 16:12:41 -0800460 lo = 0;
461
Stephen Boyd6d867b92017-04-06 17:49:16 -0700462 if (unlikely((hi > m->thres_lim) || (med > hi) || (lo > med))) {
Rohit Guptaaf503812016-01-15 16:12:41 -0800463 pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
464 hi, med, lo);
Stephen Boyd6d867b92017-04-06 17:49:16 -0700465 hi = min(hi, m->thres_lim);
Rohit Guptaaf503812016-01-15 16:12:41 -0800466 med = min(med, hi - 1);
467 lo = min(lo, med-1);
468 }
469
Stephen Boydd863ed12017-04-03 16:01:04 -0700470 switch (type) {
471 case MON1:
472 WARN(1, "Invalid\n");
473 return;
474 case MON2:
475 writel_relaxed(hi, MON2_THRES_HI(m));
476 writel_relaxed(med, MON2_THRES_MED(m));
477 writel_relaxed(lo, MON2_THRES_LO(m));
478 /* Set the zone count thresholds for interrupts */
479 writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
480 break;
481 case MON3:
482 writel_relaxed(hi, MON3_THRES_HI(m));
483 writel_relaxed(med, MON3_THRES_MED(m));
484 writel_relaxed(lo, MON3_THRES_LO(m));
485 /* Set the zone count thresholds for interrupts */
486 writel_relaxed(zone_cnt_thres, MON3_ZONE_CNT_THRES(m));
487 break;
488 }
489
Rohit Guptaaf503812016-01-15 16:12:41 -0800490 dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
Stephen Boydd863ed12017-04-03 16:01:04 -0700491 dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
Rohit Guptaaf503812016-01-15 16:12:41 -0800492}
493
Stephen Boydd863ed12017-04-03 16:01:04 -0700494static __always_inline
495void mon_set_zones(struct bwmon *m, unsigned int sample_ms,
496 enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800497{
Stephen Boydd863ed12017-04-03 16:01:04 -0700498 mon_set_hw_sampling_window(m, sample_ms, type);
499 set_zone_thres(m, sample_ms, type);
Rohit Guptaaf503812016-01-15 16:12:41 -0800500}
501
Saravana Kannaned84c522014-05-28 18:59:54 -0700502static void mon_set_limit(struct bwmon *m, u32 count)
503{
504 writel_relaxed(count, MON_THRES(m));
505 dev_dbg(m->dev, "Thres: %08x\n", count);
506}
507
508static u32 mon_get_limit(struct bwmon *m)
509{
510 return readl_relaxed(MON_THRES(m));
511}
512
Saravana Kannan75206c22014-09-22 17:42:57 -0700513#define THRES_HIT(status) (status & BIT(0))
514#define OVERFLOW(status) (status & BIT(1))
Stephen Boyda59c9182017-04-03 14:16:56 -0700515static unsigned long mon_get_count1(struct bwmon *m)
Saravana Kannaned84c522014-05-28 18:59:54 -0700516{
Saravana Kannan75206c22014-09-22 17:42:57 -0700517 unsigned long count, status;
Saravana Kannaned84c522014-05-28 18:59:54 -0700518
519 count = readl_relaxed(MON_CNT(m));
Stephen Boydd16cd622017-05-10 10:05:39 -0700520 status = mon_irq_status(m, MON1);
Saravana Kannan75206c22014-09-22 17:42:57 -0700521
Saravana Kannaned84c522014-05-28 18:59:54 -0700522 dev_dbg(m->dev, "Counter: %08lx\n", count);
Saravana Kannan75206c22014-09-22 17:42:57 -0700523
524 if (OVERFLOW(status) && m->spec->overflow)
525 count += 0xFFFFFFFF;
526 if (THRES_HIT(status) && m->spec->wrap_on_thres)
Saravana Kannaned84c522014-05-28 18:59:54 -0700527 count += mon_get_limit(m);
Saravana Kannan75206c22014-09-22 17:42:57 -0700528
Saravana Kannaned84c522014-05-28 18:59:54 -0700529 dev_dbg(m->dev, "Actual Count: %08lx\n", count);
530
531 return count;
532}
533
Stephen Boydd863ed12017-04-03 16:01:04 -0700534static __always_inline
535unsigned int get_zone(struct bwmon *m, enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800536{
537 u32 zone_counts;
538 u32 zone;
539
Stephen Boydd863ed12017-04-03 16:01:04 -0700540 zone = get_bitmask_order(m->intr_status);
Rohit Guptaaf503812016-01-15 16:12:41 -0800541 if (zone) {
542 zone--;
543 } else {
Stephen Boydd863ed12017-04-03 16:01:04 -0700544 switch (type) {
545 case MON1:
546 WARN(1, "Invalid\n");
547 return 0;
548 case MON2:
549 zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
550 break;
551 case MON3:
552 zone_counts = readl_relaxed(MON3_ZONE_CNT(m));
553 break;
554 }
555
Rohit Guptaaf503812016-01-15 16:12:41 -0800556 if (zone_counts) {
557 zone = get_bitmask_order(zone_counts) - 1;
558 zone /= 8;
559 }
560 }
561
562 m->intr_status = 0;
563 return zone;
564}
565
Stephen Boydd863ed12017-04-03 16:01:04 -0700566static __always_inline
567unsigned long get_zone_count(struct bwmon *m, unsigned int zone,
568 enum mon_reg_type type)
569{
570 unsigned long count;
571
572 switch (type) {
573 case MON1:
574 WARN(1, "Invalid\n");
575 return 0;
576 case MON2:
577 count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
578 break;
579 case MON3:
Saravana Kannan45994ff2017-09-26 18:46:47 -0700580 count = readl_relaxed(MON3_ZONE_MAX(m, zone));
581 if (count)
582 count++;
Stephen Boydd863ed12017-04-03 16:01:04 -0700583 break;
584 }
585
586 return count;
587}
588
589static __always_inline
590unsigned long mon_get_zone_stats(struct bwmon *m, enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800591{
592 unsigned int zone;
593 unsigned long count = 0;
594
Stephen Boydd863ed12017-04-03 16:01:04 -0700595 zone = get_zone(m, type);
596 count = get_zone_count(m, zone, type);
Stephen Boyd6d867b92017-04-06 17:49:16 -0700597 count <<= m->count_shift;
Rohit Guptaaf503812016-01-15 16:12:41 -0800598
599 dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
600
601 return count;
602}
603
Stephen Boyda59c9182017-04-03 14:16:56 -0700604static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700605unsigned long mon_get_count(struct bwmon *m, enum mon_reg_type type)
Stephen Boyda59c9182017-04-03 14:16:56 -0700606{
607 unsigned long count;
608
609 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700610 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700611 count = mon_get_count1(m);
612 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700613 case MON2:
Stephen Boydd863ed12017-04-03 16:01:04 -0700614 case MON3:
615 count = mon_get_zone_stats(m, type);
Stephen Boyda59c9182017-04-03 14:16:56 -0700616 break;
617 }
618
619 return count;
620}
621
Saravana Kannaned84c522014-05-28 18:59:54 -0700622/* ********** CPUBW specific code ********** */
623
624/* Returns MBps of read/writes for the sampling window. */
Saravana Kannaned84c522014-05-28 18:59:54 -0700625static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
626 unsigned int tolerance_percent)
627{
628 mbps *= (100 + tolerance_percent) * ms;
629 mbps /= 100;
630 mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
631 mbps *= SZ_1M;
632 return mbps;
633}
634
Stephen Boyda59c9182017-04-03 14:16:56 -0700635static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700636unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700637{
Saravana Kannaned84c522014-05-28 18:59:54 -0700638 struct bwmon *m = to_bwmon(hw);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700639 unsigned long count;
Saravana Kannaned84c522014-05-28 18:59:54 -0700640
Stephen Boyda59c9182017-04-03 14:16:56 -0700641 mon_disable(m, type);
642 count = mon_get_count(m, type);
643 mon_clear(m, false, type);
644 mon_irq_clear(m, type);
645 mon_enable(m, type);
Saravana Kannaned84c522014-05-28 18:59:54 -0700646
Saravana Kannanb93a2752015-06-11 16:04:23 -0700647 return count;
648}
649
Stephen Boyda59c9182017-04-03 14:16:56 -0700650static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
651{
Stephen Boydd16cd622017-05-10 10:05:39 -0700652 return __get_bytes_and_clear(hw, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700653}
654
655static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
656{
Stephen Boydd16cd622017-05-10 10:05:39 -0700657 return __get_bytes_and_clear(hw, MON2);
Stephen Boyda59c9182017-04-03 14:16:56 -0700658}
659
Stephen Boydd863ed12017-04-03 16:01:04 -0700660static unsigned long get_bytes_and_clear3(struct bw_hwmon *hw)
661{
662 return __get_bytes_and_clear(hw, MON3);
663}
664
Saravana Kannanb93a2752015-06-11 16:04:23 -0700665static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
666{
667 unsigned long count;
668 u32 limit;
669 struct bwmon *m = to_bwmon(hw);
670
Stephen Boydd16cd622017-05-10 10:05:39 -0700671 mon_disable(m, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700672 count = mon_get_count1(m);
Stephen Boydd16cd622017-05-10 10:05:39 -0700673 mon_clear(m, false, MON1);
674 mon_irq_clear(m, MON1);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700675
676 if (likely(!m->spec->wrap_on_thres))
677 limit = bytes;
678 else
679 limit = max(bytes, 500000UL);
680
681 mon_set_limit(m, limit);
Stephen Boydd16cd622017-05-10 10:05:39 -0700682 mon_enable(m, MON1);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700683
684 return count;
Saravana Kannaned84c522014-05-28 18:59:54 -0700685}
686
Stephen Boydd863ed12017-04-03 16:01:04 -0700687static unsigned long
688__set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms,
689 enum mon_reg_type type)
Rohit Guptaaf503812016-01-15 16:12:41 -0800690{
691 struct bwmon *m = to_bwmon(hw);
692
Stephen Boydd863ed12017-04-03 16:01:04 -0700693 mon_disable(m, type);
694 mon_clear(m, false, type);
695 mon_irq_clear(m, type);
Rohit Guptaaf503812016-01-15 16:12:41 -0800696
Stephen Boydd863ed12017-04-03 16:01:04 -0700697 mon_set_zones(m, sample_ms, type);
698 mon_enable(m, type);
Rohit Guptaaf503812016-01-15 16:12:41 -0800699
700 return 0;
701}
702
Stephen Boydd863ed12017-04-03 16:01:04 -0700703static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
704{
705 return __set_hw_events(hw, sample_ms, MON2);
706}
707
708static unsigned long
709set_hw_events3(struct bw_hwmon *hw, unsigned int sample_ms)
710{
711 return __set_hw_events(hw, sample_ms, MON3);
712}
713
Stephen Boyda59c9182017-04-03 14:16:56 -0700714static irqreturn_t
Stephen Boydd16cd622017-05-10 10:05:39 -0700715__bwmon_intr_handler(int irq, void *dev, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700716{
717 struct bwmon *m = dev;
Saravana Kannaned84c522014-05-28 18:59:54 -0700718
Stephen Boyda59c9182017-04-03 14:16:56 -0700719 m->intr_status = mon_irq_status(m, type);
Rohit Guptaaf503812016-01-15 16:12:41 -0800720 if (!m->intr_status)
Saravana Kannanb93a2752015-06-11 16:04:23 -0700721 return IRQ_NONE;
722
723 if (bw_hwmon_sample_end(&m->hw) > 0)
724 return IRQ_WAKE_THREAD;
725
726 return IRQ_HANDLED;
727}
728
Stephen Boyda59c9182017-04-03 14:16:56 -0700729static irqreturn_t bwmon_intr_handler(int irq, void *dev)
730{
Stephen Boydd16cd622017-05-10 10:05:39 -0700731 return __bwmon_intr_handler(irq, dev, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700732}
733
734static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
735{
Stephen Boydd16cd622017-05-10 10:05:39 -0700736 return __bwmon_intr_handler(irq, dev, MON2);
Stephen Boyda59c9182017-04-03 14:16:56 -0700737}
738
Stephen Boydd863ed12017-04-03 16:01:04 -0700739static irqreturn_t bwmon_intr_handler3(int irq, void *dev)
740{
741 return __bwmon_intr_handler(irq, dev, MON3);
742}
743
Saravana Kannanb93a2752015-06-11 16:04:23 -0700744static irqreturn_t bwmon_intr_thread(int irq, void *dev)
745{
746 struct bwmon *m = dev;
747
748 update_bw_hwmon(&m->hw);
749 return IRQ_HANDLED;
Saravana Kannaned84c522014-05-28 18:59:54 -0700750}
751
Stephen Boyda510acd2017-04-03 16:10:02 -0700752static __always_inline
753void mon_set_byte_count_filter(struct bwmon *m, enum mon_reg_type type)
754{
755 if (!m->byte_mask)
756 return;
757
758 switch (type) {
759 case MON1:
760 case MON2:
761 writel_relaxed(m->byte_mask, MON_MASK(m));
762 writel_relaxed(m->byte_match, MON_MATCH(m));
763 break;
764 case MON3:
765 writel_relaxed(m->byte_mask, MON3_MASK(m));
766 writel_relaxed(m->byte_match, MON3_MATCH(m));
767 break;
768 }
769}
770
Stephen Boydd16cd622017-05-10 10:05:39 -0700771static __always_inline int __start_bw_hwmon(struct bw_hwmon *hw,
772 unsigned long mbps, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700773{
774 struct bwmon *m = to_bwmon(hw);
Stephen Boyda59c9182017-04-03 14:16:56 -0700775 u32 limit, zone_actions;
Saravana Kannaned84c522014-05-28 18:59:54 -0700776 int ret;
Stephen Boyda59c9182017-04-03 14:16:56 -0700777 irq_handler_t handler;
Saravana Kannaned84c522014-05-28 18:59:54 -0700778
Stephen Boyda59c9182017-04-03 14:16:56 -0700779 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700780 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700781 handler = bwmon_intr_handler;
782 limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
783 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700784 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700785 zone_actions = calc_zone_actions();
786 handler = bwmon_intr_handler2;
787 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700788 case MON3:
789 zone_actions = calc_zone_actions();
790 handler = bwmon_intr_handler3;
791 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700792 }
793
794 ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
Saravana Kannaned84c522014-05-28 18:59:54 -0700795 IRQF_ONESHOT | IRQF_SHARED,
796 dev_name(m->dev), m);
797 if (ret) {
Saravana Kannancddae1b2014-08-07 19:38:02 -0700798 dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
Stephen Boyda59c9182017-04-03 14:16:56 -0700799 ret);
Saravana Kannaned84c522014-05-28 18:59:54 -0700800 return ret;
801 }
802
Stephen Boyda59c9182017-04-03 14:16:56 -0700803 mon_disable(m, type);
Saravana Kannaned84c522014-05-28 18:59:54 -0700804
Stephen Boyda59c9182017-04-03 14:16:56 -0700805 mon_clear(m, false, type);
806
807 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700808 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700809 handler = bwmon_intr_handler;
810 mon_set_limit(m, limit);
811 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700812 case MON2:
Stephen Boydd863ed12017-04-03 16:01:04 -0700813 mon_set_zones(m, hw->df->profile->polling_ms, type);
Rohit Guptaaf503812016-01-15 16:12:41 -0800814 /* Set the zone actions to increment appropriate counters */
815 writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
Stephen Boyda59c9182017-04-03 14:16:56 -0700816 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700817 case MON3:
818 mon_set_zones(m, hw->df->profile->polling_ms, type);
819 /* Set the zone actions to increment appropriate counters */
820 writel_relaxed(zone_actions, MON3_ZONE_ACTIONS(m));
Rohit Guptaaf503812016-01-15 16:12:41 -0800821 }
Saravana Kannaned84c522014-05-28 18:59:54 -0700822
Stephen Boyda510acd2017-04-03 16:10:02 -0700823 mon_set_byte_count_filter(m, type);
Stephen Boyda59c9182017-04-03 14:16:56 -0700824 mon_irq_clear(m, type);
825 mon_irq_enable(m, type);
826 mon_enable(m, type);
Saravana Kannaned84c522014-05-28 18:59:54 -0700827
828 return 0;
829}
830
Stephen Boyda59c9182017-04-03 14:16:56 -0700831static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
832{
Stephen Boydd16cd622017-05-10 10:05:39 -0700833 return __start_bw_hwmon(hw, mbps, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700834}
835
836static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
837{
Stephen Boydd16cd622017-05-10 10:05:39 -0700838 return __start_bw_hwmon(hw, mbps, MON2);
Stephen Boyda59c9182017-04-03 14:16:56 -0700839}
840
Stephen Boydd863ed12017-04-03 16:01:04 -0700841static int start_bw_hwmon3(struct bw_hwmon *hw, unsigned long mbps)
842{
843 return __start_bw_hwmon(hw, mbps, MON3);
844}
845
Stephen Boyda59c9182017-04-03 14:16:56 -0700846static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700847void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
Saravana Kannaned84c522014-05-28 18:59:54 -0700848{
849 struct bwmon *m = to_bwmon(hw);
850
Stephen Boyda59c9182017-04-03 14:16:56 -0700851 mon_irq_disable(m, type);
Saravana Kannaned84c522014-05-28 18:59:54 -0700852 free_irq(m->irq, m);
Stephen Boyda59c9182017-04-03 14:16:56 -0700853 mon_disable(m, type);
854 mon_clear(m, true, type);
855 mon_irq_clear(m, type);
856}
857
858static void stop_bw_hwmon(struct bw_hwmon *hw)
859{
Stephen Boydd16cd622017-05-10 10:05:39 -0700860 return __stop_bw_hwmon(hw, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700861}
862
863static void stop_bw_hwmon2(struct bw_hwmon *hw)
864{
Stephen Boydd16cd622017-05-10 10:05:39 -0700865 return __stop_bw_hwmon(hw, MON2);
Stephen Boyda59c9182017-04-03 14:16:56 -0700866}
867
Stephen Boydd863ed12017-04-03 16:01:04 -0700868static void stop_bw_hwmon3(struct bw_hwmon *hw)
869{
870 return __stop_bw_hwmon(hw, MON3);
871}
872
Stephen Boyda59c9182017-04-03 14:16:56 -0700873static __always_inline
Stephen Boydd16cd622017-05-10 10:05:39 -0700874int __suspend_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
Stephen Boyda59c9182017-04-03 14:16:56 -0700875{
876 struct bwmon *m = to_bwmon(hw);
877
878 mon_irq_disable(m, type);
879 free_irq(m->irq, m);
880 mon_disable(m, type);
881 mon_irq_clear(m, type);
882
883 return 0;
Saravana Kannaned84c522014-05-28 18:59:54 -0700884}
885
Saravana Kannancddae1b2014-08-07 19:38:02 -0700886static int suspend_bw_hwmon(struct bw_hwmon *hw)
887{
Stephen Boydd16cd622017-05-10 10:05:39 -0700888 return __suspend_bw_hwmon(hw, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700889}
Saravana Kannancddae1b2014-08-07 19:38:02 -0700890
Stephen Boyda59c9182017-04-03 14:16:56 -0700891static int suspend_bw_hwmon2(struct bw_hwmon *hw)
892{
Stephen Boydd16cd622017-05-10 10:05:39 -0700893 return __suspend_bw_hwmon(hw, MON2);
Stephen Boyda59c9182017-04-03 14:16:56 -0700894}
895
Stephen Boydd863ed12017-04-03 16:01:04 -0700896static int suspend_bw_hwmon3(struct bw_hwmon *hw)
897{
898 return __suspend_bw_hwmon(hw, MON3);
899}
900
901static __always_inline
902int __resume_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
Stephen Boyda59c9182017-04-03 14:16:56 -0700903{
904 struct bwmon *m = to_bwmon(hw);
905 int ret;
906 irq_handler_t handler;
907
908 switch (type) {
Stephen Boydd16cd622017-05-10 10:05:39 -0700909 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -0700910 handler = bwmon_intr_handler;
911 break;
Stephen Boydd16cd622017-05-10 10:05:39 -0700912 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -0700913 handler = bwmon_intr_handler2;
914 break;
Stephen Boydd863ed12017-04-03 16:01:04 -0700915 case MON3:
916 handler = bwmon_intr_handler3;
917 break;
Stephen Boyda59c9182017-04-03 14:16:56 -0700918 }
919
920 mon_clear(m, false, type);
921 ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
922 IRQF_ONESHOT | IRQF_SHARED,
923 dev_name(m->dev), m);
924 if (ret) {
925 dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
926 ret);
927 return ret;
928 }
929
930 mon_irq_enable(m, type);
931 mon_enable(m, type);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700932
933 return 0;
934}
935
936static int resume_bw_hwmon(struct bw_hwmon *hw)
937{
Stephen Boydd16cd622017-05-10 10:05:39 -0700938 return __resume_bw_hwmon(hw, MON1);
Stephen Boyda59c9182017-04-03 14:16:56 -0700939}
Saravana Kannancddae1b2014-08-07 19:38:02 -0700940
Stephen Boyda59c9182017-04-03 14:16:56 -0700941static int resume_bw_hwmon2(struct bw_hwmon *hw)
942{
Stephen Boydd16cd622017-05-10 10:05:39 -0700943 return __resume_bw_hwmon(hw, MON2);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700944}
945
Stephen Boydd863ed12017-04-03 16:01:04 -0700946static int resume_bw_hwmon3(struct bw_hwmon *hw)
947{
948 return __resume_bw_hwmon(hw, MON3);
949}
950
Saravana Kannaned84c522014-05-28 18:59:54 -0700951/*************************************************************************/
952
Saravana Kannan75206c22014-09-22 17:42:57 -0700953static const struct bwmon_spec spec[] = {
Stephen Boyd58b0a942017-04-03 13:25:19 -0700954 [0] = {
955 .wrap_on_thres = true,
956 .overflow = false,
957 .throt_adj = false,
Stephen Boydd863ed12017-04-03 16:01:04 -0700958 .hw_sampling = false,
959 .has_global_base = true,
960 .reg_type = MON1,
Stephen Boyd58b0a942017-04-03 13:25:19 -0700961 },
962 [1] = {
963 .wrap_on_thres = false,
964 .overflow = true,
965 .throt_adj = false,
Stephen Boydd863ed12017-04-03 16:01:04 -0700966 .hw_sampling = false,
967 .has_global_base = true,
968 .reg_type = MON1,
Stephen Boyd58b0a942017-04-03 13:25:19 -0700969 },
970 [2] = {
971 .wrap_on_thres = false,
972 .overflow = true,
973 .throt_adj = true,
Stephen Boydd863ed12017-04-03 16:01:04 -0700974 .hw_sampling = false,
975 .has_global_base = true,
976 .reg_type = MON1,
Stephen Boyd58b0a942017-04-03 13:25:19 -0700977 },
978 [3] = {
979 .wrap_on_thres = false,
980 .overflow = true,
981 .throt_adj = true,
Stephen Boydd863ed12017-04-03 16:01:04 -0700982 .hw_sampling = true,
983 .has_global_base = true,
984 .reg_type = MON2,
985 },
986 [4] = {
987 .wrap_on_thres = false,
988 .overflow = true,
989 .throt_adj = false,
990 .hw_sampling = true,
991 .reg_type = MON3,
Stephen Boyd58b0a942017-04-03 13:25:19 -0700992 },
Saravana Kannan75206c22014-09-22 17:42:57 -0700993};
994
995static const struct of_device_id bimc_bwmon_match_table[] = {
996 { .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
997 { .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700998 { .compatible = "qcom,bimc-bwmon3", .data = &spec[2] },
Rohit Guptaaf503812016-01-15 16:12:41 -0800999 { .compatible = "qcom,bimc-bwmon4", .data = &spec[3] },
Stephen Boydd863ed12017-04-03 16:01:04 -07001000 { .compatible = "qcom,bimc-bwmon5", .data = &spec[4] },
Saravana Kannan75206c22014-09-22 17:42:57 -07001001 {}
1002};
1003
Saravana Kannaned84c522014-05-28 18:59:54 -07001004static int bimc_bwmon_driver_probe(struct platform_device *pdev)
1005{
1006 struct device *dev = &pdev->dev;
1007 struct resource *res;
1008 struct bwmon *m;
1009 int ret;
Stephen Boyd6d867b92017-04-06 17:49:16 -07001010 u32 data, count_unit;
Saravana Kannaned84c522014-05-28 18:59:54 -07001011
1012 m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
1013 if (!m)
1014 return -ENOMEM;
1015 m->dev = dev;
1016
Stephen Boyd58b0a942017-04-03 13:25:19 -07001017 m->spec = of_device_get_match_data(dev);
1018 if (!m->spec) {
Saravana Kannan75206c22014-09-22 17:42:57 -07001019 dev_err(dev, "Unknown device type!\n");
1020 return -ENODEV;
1021 }
Saravana Kannan75206c22014-09-22 17:42:57 -07001022
Saravana Kannaned84c522014-05-28 18:59:54 -07001023 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
1024 if (!res) {
1025 dev_err(dev, "base not found!\n");
1026 return -EINVAL;
1027 }
1028 m->base = devm_ioremap(dev, res->start, resource_size(res));
1029 if (!m->base) {
1030 dev_err(dev, "Unable map base!\n");
1031 return -ENOMEM;
1032 }
1033
Stephen Boydd863ed12017-04-03 16:01:04 -07001034 if (m->spec->has_global_base) {
1035 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1036 "global_base");
1037 if (!res) {
1038 dev_err(dev, "global_base not found!\n");
1039 return -EINVAL;
1040 }
1041 m->global_base = devm_ioremap(dev, res->start,
1042 resource_size(res));
1043 if (!m->global_base) {
1044 dev_err(dev, "Unable map global_base!\n");
1045 return -ENOMEM;
1046 }
1047
1048 ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
1049 if (ret) {
1050 dev_err(dev, "mport not found!\n");
1051 return ret;
1052 }
1053 m->mport = data;
Saravana Kannaned84c522014-05-28 18:59:54 -07001054 }
1055
1056 m->irq = platform_get_irq(pdev, 0);
1057 if (m->irq < 0) {
1058 dev_err(dev, "Unable to get IRQ number\n");
1059 return m->irq;
1060 }
1061
1062 m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
1063 if (!m->hw.of_node)
1064 return -EINVAL;
Stephen Boyd58b0a942017-04-03 13:25:19 -07001065
Stephen Boyda59c9182017-04-03 14:16:56 -07001066 if (m->spec->hw_sampling) {
1067 ret = of_property_read_u32(dev->of_node, "qcom,hw-timer-hz",
1068 &m->hw_timer_hz);
1069 if (ret) {
1070 dev_err(dev, "HW sampling rate not specified!\n");
1071 return ret;
1072 }
Stephen Boydd863ed12017-04-03 16:01:04 -07001073 }
Stephen Boyd58b0a942017-04-03 13:25:19 -07001074
Stephen Boyd6d867b92017-04-06 17:49:16 -07001075 if (of_property_read_u32(dev->of_node, "qcom,count-unit", &count_unit))
1076 count_unit = SZ_1M;
1077 m->count_shift = order_base_2(count_unit);
1078 m->thres_lim = THRES_LIM(m->count_shift);
1079
Stephen Boydd863ed12017-04-03 16:01:04 -07001080 switch (m->spec->reg_type) {
1081 case MON3:
1082 m->hw.start_hwmon = start_bw_hwmon3;
1083 m->hw.stop_hwmon = stop_bw_hwmon3;
1084 m->hw.suspend_hwmon = suspend_bw_hwmon3;
1085 m->hw.resume_hwmon = resume_bw_hwmon3;
1086 m->hw.get_bytes_and_clear = get_bytes_and_clear3;
1087 m->hw.set_hw_events = set_hw_events3;
1088 break;
1089 case MON2:
Stephen Boyda59c9182017-04-03 14:16:56 -07001090 m->hw.start_hwmon = start_bw_hwmon2;
1091 m->hw.stop_hwmon = stop_bw_hwmon2;
1092 m->hw.suspend_hwmon = suspend_bw_hwmon2;
1093 m->hw.resume_hwmon = resume_bw_hwmon2;
1094 m->hw.get_bytes_and_clear = get_bytes_and_clear2;
Stephen Boyd58b0a942017-04-03 13:25:19 -07001095 m->hw.set_hw_events = set_hw_events;
Stephen Boydd863ed12017-04-03 16:01:04 -07001096 break;
1097 case MON1:
Stephen Boyda59c9182017-04-03 14:16:56 -07001098 m->hw.start_hwmon = start_bw_hwmon;
1099 m->hw.stop_hwmon = stop_bw_hwmon;
1100 m->hw.suspend_hwmon = suspend_bw_hwmon;
1101 m->hw.resume_hwmon = resume_bw_hwmon;
1102 m->hw.get_bytes_and_clear = get_bytes_and_clear;
1103 m->hw.set_thres = set_thres;
Stephen Boydd863ed12017-04-03 16:01:04 -07001104 break;
Stephen Boyda59c9182017-04-03 14:16:56 -07001105 }
Stephen Boyd58b0a942017-04-03 13:25:19 -07001106
Stephen Boyda510acd2017-04-03 16:10:02 -07001107 of_property_read_u32(dev->of_node, "qcom,byte-mid-match",
1108 &m->byte_match);
1109 of_property_read_u32(dev->of_node, "qcom,byte-mid-mask",
1110 &m->byte_mask);
1111
Rohit Gupta4d1f4f42015-05-08 12:04:56 -07001112 if (m->spec->throt_adj) {
Stephen Boyd58b0a942017-04-03 13:25:19 -07001113 m->hw.set_throttle_adj = mon_set_throttle_adj;
1114 m->hw.get_throttle_adj = mon_get_throttle_adj;
Rohit Gupta4d1f4f42015-05-08 12:04:56 -07001115 }
Saravana Kannaned84c522014-05-28 18:59:54 -07001116
1117 ret = register_bw_hwmon(dev, &m->hw);
1118 if (ret) {
1119 dev_err(dev, "Dev BW hwmon registration failed\n");
1120 return ret;
1121 }
1122
1123 return 0;
1124}
1125
Saravana Kannaned84c522014-05-28 18:59:54 -07001126static struct platform_driver bimc_bwmon_driver = {
1127 .probe = bimc_bwmon_driver_probe,
1128 .driver = {
1129 .name = "bimc-bwmon",
1130 .of_match_table = bimc_bwmon_match_table,
Santosh Mardi996cd772018-06-05 11:34:50 +05301131 .suppress_bind_attrs = true,
Saravana Kannaned84c522014-05-28 18:59:54 -07001132 },
1133};
1134
1135module_platform_driver(bimc_bwmon_driver);
1136MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
1137MODULE_LICENSE("GPL v2");