blob: d0be577e0c7f6c71cef005b8d7999dd1270f48bc [file] [log] [blame]
Saravana Kannanedad3012013-09-23 19:27:57 -07001/*
Maria Yu4481a7d2018-03-02 16:23:32 +08002 * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
Saravana Kannanedad3012013-09-23 19:27:57 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "bw-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
Saravana Kannanb93a2752015-06-11 16:04:23 -070028#include <linux/spinlock.h>
Saravana Kannanedad3012013-09-23 19:27:57 -070029#include <linux/platform_device.h>
30#include <linux/of.h>
31#include <linux/devfreq.h>
32#include <trace/events/power.h>
33#include "governor.h"
34#include "governor_bw_hwmon.h"
35
Saravana Kannanb93a2752015-06-11 16:04:23 -070036#define NUM_MBPS_ZONES 10
Saravana Kannanedad3012013-09-23 19:27:57 -070037struct hwmon_node {
Saravana Kannanedad3012013-09-23 19:27:57 -070038 unsigned int guard_band_mbps;
39 unsigned int decay_rate;
40 unsigned int io_percent;
41 unsigned int bw_step;
Saravana Kannanb93a2752015-06-11 16:04:23 -070042 unsigned int sample_ms;
43 unsigned int up_scale;
44 unsigned int up_thres;
45 unsigned int down_thres;
46 unsigned int down_count;
47 unsigned int hist_memory;
48 unsigned int hyst_trigger_count;
49 unsigned int hyst_length;
50 unsigned int idle_mbps;
Saravana Kannanb93a2752015-06-11 16:04:23 -070051 unsigned int mbps_zones[NUM_MBPS_ZONES];
52
Saravana Kannanedad3012013-09-23 19:27:57 -070053 unsigned long prev_ab;
54 unsigned long *dev_ab;
Saravana Kannancddae1b2014-08-07 19:38:02 -070055 unsigned long resume_freq;
56 unsigned long resume_ab;
Saravana Kannanb93a2752015-06-11 16:04:23 -070057 unsigned long bytes;
58 unsigned long max_mbps;
59 unsigned long hist_max_mbps;
60 unsigned long hist_mem;
61 unsigned long hyst_peak;
62 unsigned long hyst_mbps;
63 unsigned long hyst_trig_win;
64 unsigned long hyst_en;
Saravana Kannanb93a2752015-06-11 16:04:23 -070065 unsigned long prev_req;
Saravana Kannanb93a2752015-06-11 16:04:23 -070066 unsigned int wake;
67 unsigned int down_cnt;
Saravana Kannanedad3012013-09-23 19:27:57 -070068 ktime_t prev_ts;
Saravana Kannanb93a2752015-06-11 16:04:23 -070069 ktime_t hist_max_ts;
70 bool sampled;
Saravana Kannanedad3012013-09-23 19:27:57 -070071 bool mon_started;
72 struct list_head list;
73 void *orig_data;
74 struct bw_hwmon *hw;
75 struct devfreq_governor *gov;
76 struct attribute_group *attr_grp;
Santosh Mardi5b3e7a52018-10-25 15:39:58 +053077 struct mutex mon_lock;
Saravana Kannanedad3012013-09-23 19:27:57 -070078};
79
Saravana Kannanb93a2752015-06-11 16:04:23 -070080#define UP_WAKE 1
81#define DOWN_WAKE 2
82static DEFINE_SPINLOCK(irq_lock);
83
Saravana Kannanedad3012013-09-23 19:27:57 -070084static LIST_HEAD(hwmon_list);
85static DEFINE_MUTEX(list_lock);
86
87static int use_cnt;
88static DEFINE_MUTEX(state_lock);
89
90#define show_attr(name) \
91static ssize_t show_##name(struct device *dev, \
92 struct device_attribute *attr, char *buf) \
93{ \
94 struct devfreq *df = to_devfreq(dev); \
95 struct hwmon_node *hw = df->data; \
96 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
97}
98
99#define store_attr(name, _min, _max) \
100static ssize_t store_##name(struct device *dev, \
101 struct device_attribute *attr, const char *buf, \
102 size_t count) \
103{ \
104 struct devfreq *df = to_devfreq(dev); \
105 struct hwmon_node *hw = df->data; \
106 int ret; \
107 unsigned int val; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700108 ret = kstrtoint(buf, 10, &val); \
109 if (ret) \
110 return ret; \
Saravana Kannanedad3012013-09-23 19:27:57 -0700111 val = max(val, _min); \
112 val = min(val, _max); \
113 hw->name = val; \
114 return count; \
115}
116
117#define gov_attr(__attr, min, max) \
118show_attr(__attr) \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700119store_attr(__attr, (min), (max)) \
Saravana Kannanedad3012013-09-23 19:27:57 -0700120static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
121
Saravana Kannanb93a2752015-06-11 16:04:23 -0700122#define show_list_attr(name, n) \
123static ssize_t show_list_##name(struct device *dev, \
124 struct device_attribute *attr, char *buf) \
125{ \
126 struct devfreq *df = to_devfreq(dev); \
127 struct hwmon_node *hw = df->data; \
128 unsigned int i, cnt = 0; \
129 \
130 for (i = 0; i < n && hw->name[i]; i++) \
131 cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
132 cnt += snprintf(buf + cnt, PAGE_SIZE, "\n"); \
133 return cnt; \
134}
135
136#define store_list_attr(name, n, _min, _max) \
137static ssize_t store_list_##name(struct device *dev, \
138 struct device_attribute *attr, const char *buf, \
139 size_t count) \
140{ \
141 struct devfreq *df = to_devfreq(dev); \
142 struct hwmon_node *hw = df->data; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700143 int ret, numvals; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700144 unsigned int i = 0, val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700145 char **strlist; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700146 \
Rohit Guptafe241a02017-04-26 18:46:50 -0700147 strlist = argv_split(GFP_KERNEL, buf, &numvals); \
148 if (!strlist) \
149 return -ENOMEM; \
150 numvals = min(numvals, n - 1); \
151 for (i = 0; i < numvals; i++) { \
152 ret = kstrtouint(strlist[i], 10, &val); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700153 if (ret) \
Rohit Guptafe241a02017-04-26 18:46:50 -0700154 goto out; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700155 val = max(val, _min); \
156 val = min(val, _max); \
157 hw->name[i] = val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700158 } \
159 ret = count; \
160out: \
161 argv_free(strlist); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700162 hw->name[i] = 0; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700163 return ret; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700164}
165
166#define gov_list_attr(__attr, n, min, max) \
167show_list_attr(__attr, n) \
168store_list_attr(__attr, n, (min), (max)) \
169static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
170
Saravana Kannanedad3012013-09-23 19:27:57 -0700171#define MIN_MS 10U
172#define MAX_MS 500U
173
Saravana Kannanb93a2752015-06-11 16:04:23 -0700174/* Returns MBps of read/writes for the sampling window. */
Maria Yu4481a7d2018-03-02 16:23:32 +0800175static unsigned long bytes_to_mbps(unsigned long long bytes, unsigned int us)
Saravana Kannanedad3012013-09-23 19:27:57 -0700176{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700177 bytes *= USEC_PER_SEC;
178 do_div(bytes, us);
179 bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
180 return bytes;
181}
Saravana Kannanedad3012013-09-23 19:27:57 -0700182
Saravana Kannanb93a2752015-06-11 16:04:23 -0700183static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
184{
185 mbps *= ms;
186 mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
187 mbps *= SZ_1M;
Saravana Kannanedad3012013-09-23 19:27:57 -0700188 return mbps;
189}
190
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700191static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
Saravana Kannanedad3012013-09-23 19:27:57 -0700192{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700193 struct devfreq *df;
194 struct hwmon_node *node;
195 ktime_t ts;
196 unsigned long bytes, mbps;
197 unsigned int us;
198 int wake = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700199
Saravana Kannanb93a2752015-06-11 16:04:23 -0700200 df = hwmon->df;
201 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700202
Saravana Kannanb93a2752015-06-11 16:04:23 -0700203 ts = ktime_get();
204 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
205
206 bytes = hwmon->get_bytes_and_clear(hwmon);
207 bytes += node->bytes;
208 node->bytes = 0;
209
210 mbps = bytes_to_mbps(bytes, us);
211 node->max_mbps = max(node->max_mbps, mbps);
212
213 /*
214 * If the measured bandwidth in a micro sample is greater than the
215 * wake up threshold, it indicates an increase in load that's non
216 * trivial. So, have the governor ignore historical idle time or low
217 * bandwidth usage and do the bandwidth calculation based on just
218 * this micro sample.
219 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700220 if (mbps > node->hw->up_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700221 wake = UP_WAKE;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700222 } else if (mbps < node->hw->down_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700223 if (node->down_cnt)
224 node->down_cnt--;
225 if (node->down_cnt <= 0)
226 wake = DOWN_WAKE;
227 }
228
229 node->prev_ts = ts;
230 node->wake = wake;
231 node->sampled = true;
232
233 trace_bw_hwmon_meas(dev_name(df->dev.parent),
234 mbps,
235 us,
236 wake);
237
238 return wake;
239}
240
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700241static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
242{
243 struct devfreq *df;
244 struct hwmon_node *node;
245 unsigned long bytes, mbps;
246 int wake = 0;
247
248 df = hwmon->df;
249 node = df->data;
250
251 /*
252 * If this read is in response to an IRQ, the HW monitor should
253 * return the measurement in the micro sample that triggered the IRQ.
254 * Otherwise, it should return the maximum measured value in any
255 * micro sample since the last time we called get_bytes_and_clear()
256 */
257 bytes = hwmon->get_bytes_and_clear(hwmon);
258 mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
259 node->max_mbps = mbps;
260
261 if (mbps > node->hw->up_wake_mbps)
262 wake = UP_WAKE;
263 else if (mbps < node->hw->down_wake_mbps)
264 wake = DOWN_WAKE;
265
266 node->wake = wake;
267 node->sampled = true;
268
269 trace_bw_hwmon_meas(dev_name(df->dev.parent),
270 mbps,
271 node->sample_ms * USEC_PER_MSEC,
272 wake);
273
274 return 1;
275}
276
277static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
278{
279 if (hwmon->set_hw_events)
280 return __bw_hwmon_hw_sample_end(hwmon);
281 else
282 return __bw_hwmon_sw_sample_end(hwmon);
283}
284
Saravana Kannanb93a2752015-06-11 16:04:23 -0700285int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
286{
287 unsigned long flags;
288 int wake;
289
290 spin_lock_irqsave(&irq_lock, flags);
291 wake = __bw_hwmon_sample_end(hwmon);
292 spin_unlock_irqrestore(&irq_lock, flags);
293
294 return wake;
295}
296
297unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
298{
299 int i;
300
301 for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
302 if (node->mbps_zones[i] >= mbps)
303 return node->mbps_zones[i];
304
305 return node->hw->df->max_freq;
306}
307
308#define MIN_MBPS 500UL
309#define HIST_PEAK_TOL 60
310static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
311 unsigned long *freq, unsigned long *ab)
312{
313 unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
314 unsigned long meas_mbps_zone;
315 unsigned long hist_lo_tol, hyst_lo_tol;
316 struct bw_hwmon *hw = node->hw;
Saravana Kannan8b71d222017-10-26 15:08:28 -0700317 unsigned int new_bw, io_percent = node->io_percent;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700318 ktime_t ts;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700319 unsigned int ms = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700320
321 spin_lock_irqsave(&irq_lock, flags);
322
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700323 if (!hw->set_hw_events) {
324 ts = ktime_get();
325 ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
326 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700327 if (!node->sampled || ms >= node->sample_ms)
328 __bw_hwmon_sample_end(node->hw);
329 node->sampled = false;
330
331 req_mbps = meas_mbps = node->max_mbps;
332 node->max_mbps = 0;
333
334 hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
335 /* Remember historic peak in the past hist_mem decision windows. */
336 if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
337 /* If new max or no history */
338 node->hist_max_mbps = meas_mbps;
339 node->hist_mem = node->hist_memory;
340 } else if (meas_mbps >= hist_lo_tol) {
341 /*
342 * If subsequent peaks come close (within tolerance) to but
343 * less than the historic peak, then reset the history start,
344 * but not the peak value.
345 */
346 node->hist_mem = node->hist_memory;
Saravana Kannanedad3012013-09-23 19:27:57 -0700347 } else {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700348 /* Count down history expiration. */
349 if (node->hist_mem)
350 node->hist_mem--;
351 }
352
Saravana Kannanb93a2752015-06-11 16:04:23 -0700353 /*
354 * The AB value that corresponds to the lowest mbps zone greater than
355 * or equal to the "frequency" the current measurement will pick.
356 * This upper limit is useful for balancing out any prediction
357 * mechanisms to be power friendly.
358 */
359 meas_mbps_zone = (meas_mbps * 100) / io_percent;
360 meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
361 meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
362 meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
363
364 /*
365 * If this is a wake up due to BW increase, vote much higher BW than
366 * what we measure to stay ahead of increasing traffic and then set
367 * it up to vote for measured BW if we see down_count short sample
368 * windows of low traffic.
369 */
370 if (node->wake == UP_WAKE) {
371 req_mbps += ((meas_mbps - node->prev_req)
372 * node->up_scale) / 100;
373 /*
374 * However if the measured load is less than the historic
375 * peak, but the over request is higher than the historic
376 * peak, then we could limit the over requesting to the
377 * historic peak.
378 */
379 if (req_mbps > node->hist_max_mbps
380 && meas_mbps < node->hist_max_mbps)
381 req_mbps = node->hist_max_mbps;
382
383 req_mbps = min(req_mbps, meas_mbps_zone);
384 }
385
386 hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
387 if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
388 hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
389 node->hyst_peak = 0;
390 node->hyst_trig_win = node->hyst_length;
391 node->hyst_mbps = meas_mbps;
392 }
393
394 /*
395 * Check node->max_mbps to avoid double counting peaks that cause
396 * early termination of a window.
397 */
398 if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
399 && !node->max_mbps) {
400 node->hyst_peak++;
401 if (node->hyst_peak >= node->hyst_trigger_count
402 || node->hyst_en)
403 node->hyst_en = node->hyst_length;
404 }
405
406 if (node->hyst_trig_win)
407 node->hyst_trig_win--;
408 if (node->hyst_en)
409 node->hyst_en--;
410
411 if (!node->hyst_trig_win && !node->hyst_en) {
412 node->hyst_peak = 0;
413 node->hyst_mbps = 0;
414 }
415
416 if (node->hyst_en) {
417 if (meas_mbps > node->idle_mbps)
418 req_mbps = max(req_mbps, node->hyst_mbps);
419 }
420
421 /* Stretch the short sample window size, if the traffic is too low */
422 if (meas_mbps < MIN_MBPS) {
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700423 hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
Saravana Kannanb93a2752015-06-11 16:04:23 -0700424 * (100 + node->up_thres)) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700425 hw->down_wake_mbps = 0;
426 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700427 thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
428 node->sample_ms);
429 } else {
430 /*
431 * Up wake vs down wake are intentionally a percentage of
432 * req_mbps vs meas_mbps to make sure the over requesting
433 * phase is handled properly. We only want to wake up and
434 * reduce the vote based on the measured mbps being less than
435 * the previous measurement that caused the "over request".
436 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700437 hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
438 hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
439 if (node->wake == UP_WAKE)
440 hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
441 else
442 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700443 thres = mbps_to_bytes(meas_mbps, node->sample_ms);
444 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700445
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700446 if (hw->set_hw_events) {
447 hw->down_cnt = node->down_count;
448 hw->set_hw_events(hw, node->sample_ms);
449 } else {
450 node->down_cnt = node->down_count;
451 node->bytes = hw->set_thres(hw, thres);
452 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700453
454 node->wake = 0;
455 node->prev_req = req_mbps;
456
457 spin_unlock_irqrestore(&irq_lock, flags);
458
459 adj_mbps = req_mbps + node->guard_band_mbps;
460
461 if (adj_mbps > node->prev_ab) {
462 new_bw = adj_mbps;
463 } else {
464 new_bw = adj_mbps * node->decay_rate
Saravana Kannanedad3012013-09-23 19:27:57 -0700465 + node->prev_ab * (100 - node->decay_rate);
466 new_bw /= 100;
467 }
468
469 node->prev_ab = new_bw;
470 if (ab)
471 *ab = roundup(new_bw, node->bw_step);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700472
473 *freq = (new_bw * 100) / io_percent;
Saravana Kannanedad3012013-09-23 19:27:57 -0700474 trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
475 new_bw,
476 *freq,
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700477 hw->up_wake_mbps,
478 hw->down_wake_mbps);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700479 return req_mbps;
Saravana Kannanedad3012013-09-23 19:27:57 -0700480}
481
482static struct hwmon_node *find_hwmon_node(struct devfreq *df)
483{
484 struct hwmon_node *node, *found = NULL;
485
486 mutex_lock(&list_lock);
487 list_for_each_entry(node, &hwmon_list, list)
488 if (node->hw->dev == df->dev.parent ||
489 node->hw->of_node == df->dev.parent->of_node ||
490 (!node->hw->dev && !node->hw->of_node &&
491 node->gov == df->governor)) {
492 found = node;
493 break;
494 }
495 mutex_unlock(&list_lock);
496
497 return found;
498}
499
Saravana Kannanedad3012013-09-23 19:27:57 -0700500int update_bw_hwmon(struct bw_hwmon *hwmon)
501{
502 struct devfreq *df;
503 struct hwmon_node *node;
Saravana Kannanedad3012013-09-23 19:27:57 -0700504 int ret;
505
506 if (!hwmon)
507 return -EINVAL;
508 df = hwmon->df;
509 if (!df)
510 return -ENODEV;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700511 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700512 if (!node)
513 return -ENODEV;
514
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530515 mutex_lock(&node->mon_lock);
516 if (!node->mon_started) {
517 mutex_unlock(&node->mon_lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700518 return -EBUSY;
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530519 }
Saravana Kannanedad3012013-09-23 19:27:57 -0700520
521 dev_dbg(df->dev.parent, "Got update request\n");
522 devfreq_monitor_stop(df);
523
Saravana Kannanb93a2752015-06-11 16:04:23 -0700524 mutex_lock(&df->lock);
525 ret = update_devfreq(df);
526 if (ret)
527 dev_err(df->dev.parent,
528 "Unable to update freq on request!\n");
529 mutex_unlock(&df->lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700530
531 devfreq_monitor_start(df);
532
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530533 mutex_unlock(&node->mon_lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700534 return 0;
535}
536
Saravana Kannancddae1b2014-08-07 19:38:02 -0700537static int start_monitor(struct devfreq *df, bool init)
538{
539 struct hwmon_node *node = df->data;
540 struct bw_hwmon *hw = node->hw;
541 struct device *dev = df->dev.parent;
542 unsigned long mbps;
543 int ret;
544
545 node->prev_ts = ktime_get();
546
547 if (init) {
548 node->prev_ab = 0;
549 node->resume_freq = 0;
550 node->resume_ab = 0;
551 mbps = (df->previous_freq * node->io_percent) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700552 hw->up_wake_mbps = mbps;
553 hw->down_wake_mbps = MIN_MBPS;
554 hw->undo_over_req_mbps = 0;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700555 ret = hw->start_hwmon(hw, mbps);
556 } else {
557 ret = hw->resume_hwmon(hw);
558 }
559
560 if (ret) {
561 dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
562 return ret;
563 }
564
565 if (init)
566 devfreq_monitor_start(df);
567 else
568 devfreq_monitor_resume(df);
569
570 node->mon_started = true;
571
572 return 0;
573}
574
575static void stop_monitor(struct devfreq *df, bool init)
576{
577 struct hwmon_node *node = df->data;
578 struct bw_hwmon *hw = node->hw;
579
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530580 mutex_lock(&node->mon_lock);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700581 node->mon_started = false;
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530582 mutex_unlock(&node->mon_lock);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700583
584 if (init) {
585 devfreq_monitor_stop(df);
586 hw->stop_hwmon(hw);
587 } else {
588 devfreq_monitor_suspend(df);
589 hw->suspend_hwmon(hw);
590 }
591
592}
593
594static int gov_start(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700595{
596 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700597 struct device *dev = df->dev.parent;
598 struct hwmon_node *node;
599 struct bw_hwmon *hw;
600 struct devfreq_dev_status stat;
601
602 node = find_hwmon_node(df);
603 if (!node) {
604 dev_err(dev, "Unable to find HW monitor!\n");
605 return -ENODEV;
606 }
607 hw = node->hw;
608
609 stat.private_data = NULL;
610 if (df->profile->get_dev_status)
611 ret = df->profile->get_dev_status(df->dev.parent, &stat);
612 if (ret || !stat.private_data)
613 dev_warn(dev, "Device doesn't take AB votes!\n");
614 else
615 node->dev_ab = stat.private_data;
616
617 hw->df = df;
618 node->orig_data = df->data;
619 df->data = node;
620
Saravana Kannancddae1b2014-08-07 19:38:02 -0700621 if (start_monitor(df, true))
Saravana Kannanedad3012013-09-23 19:27:57 -0700622 goto err_start;
Saravana Kannanedad3012013-09-23 19:27:57 -0700623
624 ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
625 if (ret)
626 goto err_sysfs;
627
628 return 0;
629
630err_sysfs:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700631 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700632err_start:
633 df->data = node->orig_data;
634 node->orig_data = NULL;
635 hw->df = NULL;
636 node->dev_ab = NULL;
637 return ret;
638}
639
Saravana Kannancddae1b2014-08-07 19:38:02 -0700640static void gov_stop(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700641{
642 struct hwmon_node *node = df->data;
643 struct bw_hwmon *hw = node->hw;
644
645 sysfs_remove_group(&df->dev.kobj, node->attr_grp);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700646 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700647 df->data = node->orig_data;
648 node->orig_data = NULL;
649 hw->df = NULL;
650 /*
651 * Not all governors know about this additional extended device
652 * configuration. To avoid leaving the extended configuration at a
653 * stale state, set it to 0 and let the next governor take it from
654 * there.
655 */
656 if (node->dev_ab)
657 *node->dev_ab = 0;
658 node->dev_ab = NULL;
659}
660
Saravana Kannancddae1b2014-08-07 19:38:02 -0700661static int gov_suspend(struct devfreq *df)
662{
663 struct hwmon_node *node = df->data;
664 unsigned long resume_freq = df->previous_freq;
665 unsigned long resume_ab = *node->dev_ab;
666
667 if (!node->hw->suspend_hwmon)
668 return -ENOSYS;
669
670 if (node->resume_freq) {
671 dev_warn(df->dev.parent, "Governor already suspended!\n");
672 return -EBUSY;
673 }
674
675 stop_monitor(df, false);
676
677 mutex_lock(&df->lock);
678 update_devfreq(df);
679 mutex_unlock(&df->lock);
680
681 node->resume_freq = resume_freq;
682 node->resume_ab = resume_ab;
683
684 return 0;
685}
686
687static int gov_resume(struct devfreq *df)
688{
689 struct hwmon_node *node = df->data;
690
691 if (!node->hw->resume_hwmon)
692 return -ENOSYS;
693
694 if (!node->resume_freq) {
695 dev_warn(df->dev.parent, "Governor already resumed!\n");
696 return -EBUSY;
697 }
698
699 mutex_lock(&df->lock);
700 update_devfreq(df);
701 mutex_unlock(&df->lock);
702
703 node->resume_freq = 0;
704 node->resume_ab = 0;
705
706 return start_monitor(df, false);
707}
708
Saravana Kannanedad3012013-09-23 19:27:57 -0700709static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
710 unsigned long *freq)
711{
Saravana Kannanedad3012013-09-23 19:27:57 -0700712 struct hwmon_node *node = df->data;
713
Saravana Kannancddae1b2014-08-07 19:38:02 -0700714 /* Suspend/resume sequence */
715 if (!node->mon_started) {
716 *freq = node->resume_freq;
717 *node->dev_ab = node->resume_ab;
718 return 0;
719 }
720
Saravana Kannanb93a2752015-06-11 16:04:23 -0700721 get_bw_and_set_irq(node, freq, node->dev_ab);
Saravana Kannanedad3012013-09-23 19:27:57 -0700722
723 return 0;
724}
725
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700726static ssize_t store_throttle_adj(struct device *dev,
727 struct device_attribute *attr, const char *buf, size_t count)
728{
729 struct devfreq *df = to_devfreq(dev);
730 struct hwmon_node *node = df->data;
731 int ret;
732 unsigned int val;
733
734 if (!node->hw->set_throttle_adj)
735 return -ENOSYS;
736
737 ret = kstrtouint(buf, 10, &val);
738 if (ret)
739 return ret;
740
741 ret = node->hw->set_throttle_adj(node->hw, val);
742
743 if (!ret)
744 return count;
745 else
746 return ret;
747}
748
749static ssize_t show_throttle_adj(struct device *dev,
750 struct device_attribute *attr, char *buf)
751{
752 struct devfreq *df = to_devfreq(dev);
753 struct hwmon_node *node = df->data;
754 unsigned int val;
755
756 if (!node->hw->get_throttle_adj)
757 val = 0;
758 else
759 val = node->hw->get_throttle_adj(node->hw);
760
761 return snprintf(buf, PAGE_SIZE, "%u\n", val);
762}
763
764static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
765 store_throttle_adj);
766
Saravana Kannanedad3012013-09-23 19:27:57 -0700767gov_attr(guard_band_mbps, 0U, 2000U);
768gov_attr(decay_rate, 0U, 100U);
769gov_attr(io_percent, 1U, 100U);
770gov_attr(bw_step, 50U, 1000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700771gov_attr(sample_ms, 1U, 50U);
772gov_attr(up_scale, 0U, 500U);
773gov_attr(up_thres, 1U, 100U);
774gov_attr(down_thres, 0U, 90U);
775gov_attr(down_count, 0U, 90U);
776gov_attr(hist_memory, 0U, 90U);
777gov_attr(hyst_trigger_count, 0U, 90U);
778gov_attr(hyst_length, 0U, 90U);
779gov_attr(idle_mbps, 0U, 2000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700780gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
Saravana Kannanedad3012013-09-23 19:27:57 -0700781
782static struct attribute *dev_attr[] = {
Saravana Kannanedad3012013-09-23 19:27:57 -0700783 &dev_attr_guard_band_mbps.attr,
784 &dev_attr_decay_rate.attr,
785 &dev_attr_io_percent.attr,
786 &dev_attr_bw_step.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700787 &dev_attr_sample_ms.attr,
788 &dev_attr_up_scale.attr,
789 &dev_attr_up_thres.attr,
790 &dev_attr_down_thres.attr,
791 &dev_attr_down_count.attr,
792 &dev_attr_hist_memory.attr,
793 &dev_attr_hyst_trigger_count.attr,
794 &dev_attr_hyst_length.attr,
795 &dev_attr_idle_mbps.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700796 &dev_attr_mbps_zones.attr,
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700797 &dev_attr_throttle_adj.attr,
Saravana Kannanedad3012013-09-23 19:27:57 -0700798 NULL,
799};
800
801static struct attribute_group dev_attr_group = {
802 .name = "bw_hwmon",
803 .attrs = dev_attr,
804};
805
806static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
807 unsigned int event, void *data)
808{
Jonathan Avila52593752017-10-04 17:00:37 -0700809 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700810 unsigned int sample_ms;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800811 struct hwmon_node *node;
812 struct bw_hwmon *hw;
Saravana Kannanedad3012013-09-23 19:27:57 -0700813
Jonathan Avila52593752017-10-04 17:00:37 -0700814 mutex_lock(&state_lock);
815
Saravana Kannanedad3012013-09-23 19:27:57 -0700816 switch (event) {
817 case DEVFREQ_GOV_START:
818 sample_ms = df->profile->polling_ms;
819 sample_ms = max(MIN_MS, sample_ms);
820 sample_ms = min(MAX_MS, sample_ms);
821 df->profile->polling_ms = sample_ms;
822
Saravana Kannancddae1b2014-08-07 19:38:02 -0700823 ret = gov_start(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700824 if (ret)
Jonathan Avila52593752017-10-04 17:00:37 -0700825 goto out;
Saravana Kannanedad3012013-09-23 19:27:57 -0700826
827 dev_dbg(df->dev.parent,
828 "Enabled dev BW HW monitor governor\n");
829 break;
830
831 case DEVFREQ_GOV_STOP:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700832 gov_stop(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700833 dev_dbg(df->dev.parent,
834 "Disabled dev BW HW monitor governor\n");
835 break;
836
837 case DEVFREQ_GOV_INTERVAL:
838 sample_ms = *(unsigned int *)data;
839 sample_ms = max(MIN_MS, sample_ms);
840 sample_ms = min(MAX_MS, sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800841 /*
842 * Suspend/resume the HW monitor around the interval update
843 * to prevent the HW monitor IRQ from trying to change
844 * stop/start the delayed workqueue while the interval update
845 * is happening.
846 */
847 node = df->data;
848 hw = node->hw;
849 hw->suspend_hwmon(hw);
Saravana Kannanedad3012013-09-23 19:27:57 -0700850 devfreq_interval_update(df, &sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800851 ret = hw->resume_hwmon(hw);
852 if (ret) {
853 dev_err(df->dev.parent,
854 "Unable to resume HW monitor (%d)\n", ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700855 goto out;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800856 }
Saravana Kannanedad3012013-09-23 19:27:57 -0700857 break;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700858
859 case DEVFREQ_GOV_SUSPEND:
860 ret = gov_suspend(df);
861 if (ret) {
862 dev_err(df->dev.parent,
863 "Unable to suspend BW HW mon governor (%d)\n",
864 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700865 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700866 }
867
868 dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
869 break;
870
871 case DEVFREQ_GOV_RESUME:
872 ret = gov_resume(df);
873 if (ret) {
874 dev_err(df->dev.parent,
875 "Unable to resume BW HW mon governor (%d)\n",
876 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700877 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700878 }
879
880 dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
881 break;
Saravana Kannanedad3012013-09-23 19:27:57 -0700882 }
883
Jonathan Avila52593752017-10-04 17:00:37 -0700884out:
885 mutex_unlock(&state_lock);
886
887 return ret;
Saravana Kannanedad3012013-09-23 19:27:57 -0700888}
889
890static struct devfreq_governor devfreq_gov_bw_hwmon = {
891 .name = "bw_hwmon",
892 .get_target_freq = devfreq_bw_hwmon_get_freq,
893 .event_handler = devfreq_bw_hwmon_ev_handler,
894};
895
896int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
897{
898 int ret = 0;
899 struct hwmon_node *node;
900 struct attribute_group *attr_grp;
901
902 if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
903 return -EINVAL;
904
905 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
906 if (!node)
907 return -ENOMEM;
908
909 if (hwmon->gov) {
910 attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
911 if (!attr_grp)
912 return -ENOMEM;
913
914 hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
915 hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
916 attr_grp->name = hwmon->gov->name;
917 attr_grp->attrs = dev_attr;
918
919 node->gov = hwmon->gov;
920 node->attr_grp = attr_grp;
921 } else {
922 node->gov = &devfreq_gov_bw_hwmon;
923 node->attr_grp = &dev_attr_group;
924 }
925
Saravana Kannanedad3012013-09-23 19:27:57 -0700926 node->guard_band_mbps = 100;
927 node->decay_rate = 90;
928 node->io_percent = 16;
929 node->bw_step = 190;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700930 node->sample_ms = 50;
931 node->up_scale = 0;
932 node->up_thres = 10;
933 node->down_thres = 0;
934 node->down_count = 3;
935 node->hist_memory = 0;
936 node->hyst_trigger_count = 3;
937 node->hyst_length = 0;
938 node->idle_mbps = 400;
939 node->mbps_zones[0] = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700940 node->hw = hwmon;
941
Santosh Mardi5b3e7a52018-10-25 15:39:58 +0530942 mutex_init(&node->mon_lock);
943
Saravana Kannanedad3012013-09-23 19:27:57 -0700944 mutex_lock(&list_lock);
945 list_add_tail(&node->list, &hwmon_list);
946 mutex_unlock(&list_lock);
947
948 if (hwmon->gov) {
949 ret = devfreq_add_governor(hwmon->gov);
950 } else {
951 mutex_lock(&state_lock);
952 if (!use_cnt)
953 ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
954 if (!ret)
955 use_cnt++;
956 mutex_unlock(&state_lock);
957 }
958
959 if (!ret)
960 dev_info(dev, "BW HWmon governor registered.\n");
961 else
962 dev_err(dev, "BW HWmon governor registration failed!\n");
963
964 return ret;
965}
966
967MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
968MODULE_LICENSE("GPL v2");