blob: a1d9b50974981d94266286899791448ba533be01 [file] [log] [blame]
Saravana Kannanedad3012013-09-23 19:27:57 -07001/*
Rohit Guptafe241a02017-04-26 18:46:50 -07002 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Saravana Kannanedad3012013-09-23 19:27:57 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "bw-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
Saravana Kannanb93a2752015-06-11 16:04:23 -070028#include <linux/spinlock.h>
Saravana Kannanedad3012013-09-23 19:27:57 -070029#include <linux/platform_device.h>
30#include <linux/of.h>
31#include <linux/devfreq.h>
32#include <trace/events/power.h>
33#include "governor.h"
34#include "governor_bw_hwmon.h"
35
Saravana Kannanb93a2752015-06-11 16:04:23 -070036#define NUM_MBPS_ZONES 10
Saravana Kannanedad3012013-09-23 19:27:57 -070037struct hwmon_node {
Saravana Kannanedad3012013-09-23 19:27:57 -070038 unsigned int guard_band_mbps;
39 unsigned int decay_rate;
40 unsigned int io_percent;
41 unsigned int bw_step;
Saravana Kannanb93a2752015-06-11 16:04:23 -070042 unsigned int sample_ms;
43 unsigned int up_scale;
44 unsigned int up_thres;
45 unsigned int down_thres;
46 unsigned int down_count;
47 unsigned int hist_memory;
48 unsigned int hyst_trigger_count;
49 unsigned int hyst_length;
50 unsigned int idle_mbps;
51 unsigned int low_power_ceil_mbps;
52 unsigned int low_power_io_percent;
53 unsigned int low_power_delay;
54 unsigned int mbps_zones[NUM_MBPS_ZONES];
55
Saravana Kannanedad3012013-09-23 19:27:57 -070056 unsigned long prev_ab;
57 unsigned long *dev_ab;
Saravana Kannancddae1b2014-08-07 19:38:02 -070058 unsigned long resume_freq;
59 unsigned long resume_ab;
Saravana Kannanb93a2752015-06-11 16:04:23 -070060 unsigned long bytes;
61 unsigned long max_mbps;
62 unsigned long hist_max_mbps;
63 unsigned long hist_mem;
64 unsigned long hyst_peak;
65 unsigned long hyst_mbps;
66 unsigned long hyst_trig_win;
67 unsigned long hyst_en;
68 unsigned long above_low_power;
69 unsigned long prev_req;
Saravana Kannanb93a2752015-06-11 16:04:23 -070070 unsigned int wake;
71 unsigned int down_cnt;
Saravana Kannanedad3012013-09-23 19:27:57 -070072 ktime_t prev_ts;
Saravana Kannanb93a2752015-06-11 16:04:23 -070073 ktime_t hist_max_ts;
74 bool sampled;
Saravana Kannanedad3012013-09-23 19:27:57 -070075 bool mon_started;
76 struct list_head list;
77 void *orig_data;
78 struct bw_hwmon *hw;
79 struct devfreq_governor *gov;
80 struct attribute_group *attr_grp;
81};
82
Saravana Kannanb93a2752015-06-11 16:04:23 -070083#define UP_WAKE 1
84#define DOWN_WAKE 2
85static DEFINE_SPINLOCK(irq_lock);
86
Saravana Kannanedad3012013-09-23 19:27:57 -070087static LIST_HEAD(hwmon_list);
88static DEFINE_MUTEX(list_lock);
89
90static int use_cnt;
91static DEFINE_MUTEX(state_lock);
92
93#define show_attr(name) \
94static ssize_t show_##name(struct device *dev, \
95 struct device_attribute *attr, char *buf) \
96{ \
97 struct devfreq *df = to_devfreq(dev); \
98 struct hwmon_node *hw = df->data; \
99 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
100}
101
102#define store_attr(name, _min, _max) \
103static ssize_t store_##name(struct device *dev, \
104 struct device_attribute *attr, const char *buf, \
105 size_t count) \
106{ \
107 struct devfreq *df = to_devfreq(dev); \
108 struct hwmon_node *hw = df->data; \
109 int ret; \
110 unsigned int val; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700111 ret = kstrtoint(buf, 10, &val); \
112 if (ret) \
113 return ret; \
Saravana Kannanedad3012013-09-23 19:27:57 -0700114 val = max(val, _min); \
115 val = min(val, _max); \
116 hw->name = val; \
117 return count; \
118}
119
120#define gov_attr(__attr, min, max) \
121show_attr(__attr) \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700122store_attr(__attr, (min), (max)) \
Saravana Kannanedad3012013-09-23 19:27:57 -0700123static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
124
Saravana Kannanb93a2752015-06-11 16:04:23 -0700125#define show_list_attr(name, n) \
126static ssize_t show_list_##name(struct device *dev, \
127 struct device_attribute *attr, char *buf) \
128{ \
129 struct devfreq *df = to_devfreq(dev); \
130 struct hwmon_node *hw = df->data; \
131 unsigned int i, cnt = 0; \
132 \
133 for (i = 0; i < n && hw->name[i]; i++) \
134 cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
135 cnt += snprintf(buf + cnt, PAGE_SIZE, "\n"); \
136 return cnt; \
137}
138
139#define store_list_attr(name, n, _min, _max) \
140static ssize_t store_list_##name(struct device *dev, \
141 struct device_attribute *attr, const char *buf, \
142 size_t count) \
143{ \
144 struct devfreq *df = to_devfreq(dev); \
145 struct hwmon_node *hw = df->data; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700146 int ret, numvals; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700147 unsigned int i = 0, val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700148 char **strlist; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700149 \
Rohit Guptafe241a02017-04-26 18:46:50 -0700150 strlist = argv_split(GFP_KERNEL, buf, &numvals); \
151 if (!strlist) \
152 return -ENOMEM; \
153 numvals = min(numvals, n - 1); \
154 for (i = 0; i < numvals; i++) { \
155 ret = kstrtouint(strlist[i], 10, &val); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700156 if (ret) \
Rohit Guptafe241a02017-04-26 18:46:50 -0700157 goto out; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700158 val = max(val, _min); \
159 val = min(val, _max); \
160 hw->name[i] = val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700161 } \
162 ret = count; \
163out: \
164 argv_free(strlist); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700165 hw->name[i] = 0; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700166 return ret; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700167}
168
169#define gov_list_attr(__attr, n, min, max) \
170show_list_attr(__attr, n) \
171store_list_attr(__attr, n, (min), (max)) \
172static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
173
Saravana Kannanedad3012013-09-23 19:27:57 -0700174#define MIN_MS 10U
175#define MAX_MS 500U
176
Saravana Kannanb93a2752015-06-11 16:04:23 -0700177/* Returns MBps of read/writes for the sampling window. */
178static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
Saravana Kannanedad3012013-09-23 19:27:57 -0700179{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700180 bytes *= USEC_PER_SEC;
181 do_div(bytes, us);
182 bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
183 return bytes;
184}
Saravana Kannanedad3012013-09-23 19:27:57 -0700185
Saravana Kannanb93a2752015-06-11 16:04:23 -0700186static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
187{
188 mbps *= ms;
189 mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
190 mbps *= SZ_1M;
Saravana Kannanedad3012013-09-23 19:27:57 -0700191 return mbps;
192}
193
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700194static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
Saravana Kannanedad3012013-09-23 19:27:57 -0700195{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700196 struct devfreq *df;
197 struct hwmon_node *node;
198 ktime_t ts;
199 unsigned long bytes, mbps;
200 unsigned int us;
201 int wake = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700202
Saravana Kannanb93a2752015-06-11 16:04:23 -0700203 df = hwmon->df;
204 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700205
Saravana Kannanb93a2752015-06-11 16:04:23 -0700206 ts = ktime_get();
207 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
208
209 bytes = hwmon->get_bytes_and_clear(hwmon);
210 bytes += node->bytes;
211 node->bytes = 0;
212
213 mbps = bytes_to_mbps(bytes, us);
214 node->max_mbps = max(node->max_mbps, mbps);
215
216 /*
217 * If the measured bandwidth in a micro sample is greater than the
218 * wake up threshold, it indicates an increase in load that's non
219 * trivial. So, have the governor ignore historical idle time or low
220 * bandwidth usage and do the bandwidth calculation based on just
221 * this micro sample.
222 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700223 if (mbps > node->hw->up_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700224 wake = UP_WAKE;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700225 } else if (mbps < node->hw->down_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700226 if (node->down_cnt)
227 node->down_cnt--;
228 if (node->down_cnt <= 0)
229 wake = DOWN_WAKE;
230 }
231
232 node->prev_ts = ts;
233 node->wake = wake;
234 node->sampled = true;
235
236 trace_bw_hwmon_meas(dev_name(df->dev.parent),
237 mbps,
238 us,
239 wake);
240
241 return wake;
242}
243
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700244static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
245{
246 struct devfreq *df;
247 struct hwmon_node *node;
248 unsigned long bytes, mbps;
249 int wake = 0;
250
251 df = hwmon->df;
252 node = df->data;
253
254 /*
255 * If this read is in response to an IRQ, the HW monitor should
256 * return the measurement in the micro sample that triggered the IRQ.
257 * Otherwise, it should return the maximum measured value in any
258 * micro sample since the last time we called get_bytes_and_clear()
259 */
260 bytes = hwmon->get_bytes_and_clear(hwmon);
261 mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
262 node->max_mbps = mbps;
263
264 if (mbps > node->hw->up_wake_mbps)
265 wake = UP_WAKE;
266 else if (mbps < node->hw->down_wake_mbps)
267 wake = DOWN_WAKE;
268
269 node->wake = wake;
270 node->sampled = true;
271
272 trace_bw_hwmon_meas(dev_name(df->dev.parent),
273 mbps,
274 node->sample_ms * USEC_PER_MSEC,
275 wake);
276
277 return 1;
278}
279
280static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
281{
282 if (hwmon->set_hw_events)
283 return __bw_hwmon_hw_sample_end(hwmon);
284 else
285 return __bw_hwmon_sw_sample_end(hwmon);
286}
287
Saravana Kannanb93a2752015-06-11 16:04:23 -0700288int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
289{
290 unsigned long flags;
291 int wake;
292
293 spin_lock_irqsave(&irq_lock, flags);
294 wake = __bw_hwmon_sample_end(hwmon);
295 spin_unlock_irqrestore(&irq_lock, flags);
296
297 return wake;
298}
299
300unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
301{
302 int i;
303
304 for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
305 if (node->mbps_zones[i] >= mbps)
306 return node->mbps_zones[i];
307
308 return node->hw->df->max_freq;
309}
310
311#define MIN_MBPS 500UL
312#define HIST_PEAK_TOL 60
313static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
314 unsigned long *freq, unsigned long *ab)
315{
316 unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
317 unsigned long meas_mbps_zone;
318 unsigned long hist_lo_tol, hyst_lo_tol;
319 struct bw_hwmon *hw = node->hw;
320 unsigned int new_bw, io_percent;
321 ktime_t ts;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700322 unsigned int ms = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700323
324 spin_lock_irqsave(&irq_lock, flags);
325
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700326 if (!hw->set_hw_events) {
327 ts = ktime_get();
328 ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
329 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700330 if (!node->sampled || ms >= node->sample_ms)
331 __bw_hwmon_sample_end(node->hw);
332 node->sampled = false;
333
334 req_mbps = meas_mbps = node->max_mbps;
335 node->max_mbps = 0;
336
337 hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
338 /* Remember historic peak in the past hist_mem decision windows. */
339 if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
340 /* If new max or no history */
341 node->hist_max_mbps = meas_mbps;
342 node->hist_mem = node->hist_memory;
343 } else if (meas_mbps >= hist_lo_tol) {
344 /*
345 * If subsequent peaks come close (within tolerance) to but
346 * less than the historic peak, then reset the history start,
347 * but not the peak value.
348 */
349 node->hist_mem = node->hist_memory;
Saravana Kannanedad3012013-09-23 19:27:57 -0700350 } else {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700351 /* Count down history expiration. */
352 if (node->hist_mem)
353 node->hist_mem--;
354 }
355
356 /* Keep track of whether we are in low power mode consistently. */
357 if (meas_mbps > node->low_power_ceil_mbps)
358 node->above_low_power = node->low_power_delay;
359 if (node->above_low_power)
360 node->above_low_power--;
361
362 if (node->above_low_power)
363 io_percent = node->io_percent;
364 else
365 io_percent = node->low_power_io_percent;
366
367 /*
368 * The AB value that corresponds to the lowest mbps zone greater than
369 * or equal to the "frequency" the current measurement will pick.
370 * This upper limit is useful for balancing out any prediction
371 * mechanisms to be power friendly.
372 */
373 meas_mbps_zone = (meas_mbps * 100) / io_percent;
374 meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
375 meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
376 meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
377
378 /*
379 * If this is a wake up due to BW increase, vote much higher BW than
380 * what we measure to stay ahead of increasing traffic and then set
381 * it up to vote for measured BW if we see down_count short sample
382 * windows of low traffic.
383 */
384 if (node->wake == UP_WAKE) {
385 req_mbps += ((meas_mbps - node->prev_req)
386 * node->up_scale) / 100;
387 /*
388 * However if the measured load is less than the historic
389 * peak, but the over request is higher than the historic
390 * peak, then we could limit the over requesting to the
391 * historic peak.
392 */
393 if (req_mbps > node->hist_max_mbps
394 && meas_mbps < node->hist_max_mbps)
395 req_mbps = node->hist_max_mbps;
396
397 req_mbps = min(req_mbps, meas_mbps_zone);
398 }
399
400 hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
401 if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
402 hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
403 node->hyst_peak = 0;
404 node->hyst_trig_win = node->hyst_length;
405 node->hyst_mbps = meas_mbps;
406 }
407
408 /*
409 * Check node->max_mbps to avoid double counting peaks that cause
410 * early termination of a window.
411 */
412 if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
413 && !node->max_mbps) {
414 node->hyst_peak++;
415 if (node->hyst_peak >= node->hyst_trigger_count
416 || node->hyst_en)
417 node->hyst_en = node->hyst_length;
418 }
419
420 if (node->hyst_trig_win)
421 node->hyst_trig_win--;
422 if (node->hyst_en)
423 node->hyst_en--;
424
425 if (!node->hyst_trig_win && !node->hyst_en) {
426 node->hyst_peak = 0;
427 node->hyst_mbps = 0;
428 }
429
430 if (node->hyst_en) {
431 if (meas_mbps > node->idle_mbps)
432 req_mbps = max(req_mbps, node->hyst_mbps);
433 }
434
435 /* Stretch the short sample window size, if the traffic is too low */
436 if (meas_mbps < MIN_MBPS) {
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700437 hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
Saravana Kannanb93a2752015-06-11 16:04:23 -0700438 * (100 + node->up_thres)) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700439 hw->down_wake_mbps = 0;
440 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700441 thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
442 node->sample_ms);
443 } else {
444 /*
445 * Up wake vs down wake are intentionally a percentage of
446 * req_mbps vs meas_mbps to make sure the over requesting
447 * phase is handled properly. We only want to wake up and
448 * reduce the vote based on the measured mbps being less than
449 * the previous measurement that caused the "over request".
450 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700451 hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
452 hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
453 if (node->wake == UP_WAKE)
454 hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
455 else
456 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700457 thres = mbps_to_bytes(meas_mbps, node->sample_ms);
458 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700459
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700460 if (hw->set_hw_events) {
461 hw->down_cnt = node->down_count;
462 hw->set_hw_events(hw, node->sample_ms);
463 } else {
464 node->down_cnt = node->down_count;
465 node->bytes = hw->set_thres(hw, thres);
466 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700467
468 node->wake = 0;
469 node->prev_req = req_mbps;
470
471 spin_unlock_irqrestore(&irq_lock, flags);
472
473 adj_mbps = req_mbps + node->guard_band_mbps;
474
475 if (adj_mbps > node->prev_ab) {
476 new_bw = adj_mbps;
477 } else {
478 new_bw = adj_mbps * node->decay_rate
Saravana Kannanedad3012013-09-23 19:27:57 -0700479 + node->prev_ab * (100 - node->decay_rate);
480 new_bw /= 100;
481 }
482
483 node->prev_ab = new_bw;
484 if (ab)
485 *ab = roundup(new_bw, node->bw_step);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700486
487 *freq = (new_bw * 100) / io_percent;
Saravana Kannanedad3012013-09-23 19:27:57 -0700488 trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
489 new_bw,
490 *freq,
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700491 hw->up_wake_mbps,
492 hw->down_wake_mbps);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700493 return req_mbps;
Saravana Kannanedad3012013-09-23 19:27:57 -0700494}
495
496static struct hwmon_node *find_hwmon_node(struct devfreq *df)
497{
498 struct hwmon_node *node, *found = NULL;
499
500 mutex_lock(&list_lock);
501 list_for_each_entry(node, &hwmon_list, list)
502 if (node->hw->dev == df->dev.parent ||
503 node->hw->of_node == df->dev.parent->of_node ||
504 (!node->hw->dev && !node->hw->of_node &&
505 node->gov == df->governor)) {
506 found = node;
507 break;
508 }
509 mutex_unlock(&list_lock);
510
511 return found;
512}
513
Saravana Kannanedad3012013-09-23 19:27:57 -0700514int update_bw_hwmon(struct bw_hwmon *hwmon)
515{
516 struct devfreq *df;
517 struct hwmon_node *node;
Saravana Kannanedad3012013-09-23 19:27:57 -0700518 int ret;
519
520 if (!hwmon)
521 return -EINVAL;
522 df = hwmon->df;
523 if (!df)
524 return -ENODEV;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700525 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700526 if (!node)
527 return -ENODEV;
528
529 if (!node->mon_started)
530 return -EBUSY;
531
532 dev_dbg(df->dev.parent, "Got update request\n");
533 devfreq_monitor_stop(df);
534
Saravana Kannanb93a2752015-06-11 16:04:23 -0700535 mutex_lock(&df->lock);
536 ret = update_devfreq(df);
537 if (ret)
538 dev_err(df->dev.parent,
539 "Unable to update freq on request!\n");
540 mutex_unlock(&df->lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700541
542 devfreq_monitor_start(df);
543
544 return 0;
545}
546
Saravana Kannancddae1b2014-08-07 19:38:02 -0700547static int start_monitor(struct devfreq *df, bool init)
548{
549 struct hwmon_node *node = df->data;
550 struct bw_hwmon *hw = node->hw;
551 struct device *dev = df->dev.parent;
552 unsigned long mbps;
553 int ret;
554
555 node->prev_ts = ktime_get();
556
557 if (init) {
558 node->prev_ab = 0;
559 node->resume_freq = 0;
560 node->resume_ab = 0;
561 mbps = (df->previous_freq * node->io_percent) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700562 hw->up_wake_mbps = mbps;
563 hw->down_wake_mbps = MIN_MBPS;
564 hw->undo_over_req_mbps = 0;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700565 ret = hw->start_hwmon(hw, mbps);
566 } else {
567 ret = hw->resume_hwmon(hw);
568 }
569
570 if (ret) {
571 dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
572 return ret;
573 }
574
575 if (init)
576 devfreq_monitor_start(df);
577 else
578 devfreq_monitor_resume(df);
579
580 node->mon_started = true;
581
582 return 0;
583}
584
585static void stop_monitor(struct devfreq *df, bool init)
586{
587 struct hwmon_node *node = df->data;
588 struct bw_hwmon *hw = node->hw;
589
590 node->mon_started = false;
591
592 if (init) {
593 devfreq_monitor_stop(df);
594 hw->stop_hwmon(hw);
595 } else {
596 devfreq_monitor_suspend(df);
597 hw->suspend_hwmon(hw);
598 }
599
600}
601
602static int gov_start(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700603{
604 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700605 struct device *dev = df->dev.parent;
606 struct hwmon_node *node;
607 struct bw_hwmon *hw;
608 struct devfreq_dev_status stat;
609
610 node = find_hwmon_node(df);
611 if (!node) {
612 dev_err(dev, "Unable to find HW monitor!\n");
613 return -ENODEV;
614 }
615 hw = node->hw;
616
617 stat.private_data = NULL;
618 if (df->profile->get_dev_status)
619 ret = df->profile->get_dev_status(df->dev.parent, &stat);
620 if (ret || !stat.private_data)
621 dev_warn(dev, "Device doesn't take AB votes!\n");
622 else
623 node->dev_ab = stat.private_data;
624
625 hw->df = df;
626 node->orig_data = df->data;
627 df->data = node;
628
Saravana Kannancddae1b2014-08-07 19:38:02 -0700629 if (start_monitor(df, true))
Saravana Kannanedad3012013-09-23 19:27:57 -0700630 goto err_start;
Saravana Kannanedad3012013-09-23 19:27:57 -0700631
632 ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
633 if (ret)
634 goto err_sysfs;
635
636 return 0;
637
638err_sysfs:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700639 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700640err_start:
641 df->data = node->orig_data;
642 node->orig_data = NULL;
643 hw->df = NULL;
644 node->dev_ab = NULL;
645 return ret;
646}
647
Saravana Kannancddae1b2014-08-07 19:38:02 -0700648static void gov_stop(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700649{
650 struct hwmon_node *node = df->data;
651 struct bw_hwmon *hw = node->hw;
652
653 sysfs_remove_group(&df->dev.kobj, node->attr_grp);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700654 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700655 df->data = node->orig_data;
656 node->orig_data = NULL;
657 hw->df = NULL;
658 /*
659 * Not all governors know about this additional extended device
660 * configuration. To avoid leaving the extended configuration at a
661 * stale state, set it to 0 and let the next governor take it from
662 * there.
663 */
664 if (node->dev_ab)
665 *node->dev_ab = 0;
666 node->dev_ab = NULL;
667}
668
Saravana Kannancddae1b2014-08-07 19:38:02 -0700669static int gov_suspend(struct devfreq *df)
670{
671 struct hwmon_node *node = df->data;
672 unsigned long resume_freq = df->previous_freq;
673 unsigned long resume_ab = *node->dev_ab;
674
675 if (!node->hw->suspend_hwmon)
676 return -ENOSYS;
677
678 if (node->resume_freq) {
679 dev_warn(df->dev.parent, "Governor already suspended!\n");
680 return -EBUSY;
681 }
682
683 stop_monitor(df, false);
684
685 mutex_lock(&df->lock);
686 update_devfreq(df);
687 mutex_unlock(&df->lock);
688
689 node->resume_freq = resume_freq;
690 node->resume_ab = resume_ab;
691
692 return 0;
693}
694
695static int gov_resume(struct devfreq *df)
696{
697 struct hwmon_node *node = df->data;
698
699 if (!node->hw->resume_hwmon)
700 return -ENOSYS;
701
702 if (!node->resume_freq) {
703 dev_warn(df->dev.parent, "Governor already resumed!\n");
704 return -EBUSY;
705 }
706
707 mutex_lock(&df->lock);
708 update_devfreq(df);
709 mutex_unlock(&df->lock);
710
711 node->resume_freq = 0;
712 node->resume_ab = 0;
713
714 return start_monitor(df, false);
715}
716
Saravana Kannanedad3012013-09-23 19:27:57 -0700717static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
718 unsigned long *freq)
719{
Saravana Kannanedad3012013-09-23 19:27:57 -0700720 struct hwmon_node *node = df->data;
721
Saravana Kannancddae1b2014-08-07 19:38:02 -0700722 /* Suspend/resume sequence */
723 if (!node->mon_started) {
724 *freq = node->resume_freq;
725 *node->dev_ab = node->resume_ab;
726 return 0;
727 }
728
Saravana Kannanb93a2752015-06-11 16:04:23 -0700729 get_bw_and_set_irq(node, freq, node->dev_ab);
Saravana Kannanedad3012013-09-23 19:27:57 -0700730
731 return 0;
732}
733
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700734static ssize_t store_throttle_adj(struct device *dev,
735 struct device_attribute *attr, const char *buf, size_t count)
736{
737 struct devfreq *df = to_devfreq(dev);
738 struct hwmon_node *node = df->data;
739 int ret;
740 unsigned int val;
741
742 if (!node->hw->set_throttle_adj)
743 return -ENOSYS;
744
745 ret = kstrtouint(buf, 10, &val);
746 if (ret)
747 return ret;
748
749 ret = node->hw->set_throttle_adj(node->hw, val);
750
751 if (!ret)
752 return count;
753 else
754 return ret;
755}
756
757static ssize_t show_throttle_adj(struct device *dev,
758 struct device_attribute *attr, char *buf)
759{
760 struct devfreq *df = to_devfreq(dev);
761 struct hwmon_node *node = df->data;
762 unsigned int val;
763
764 if (!node->hw->get_throttle_adj)
765 val = 0;
766 else
767 val = node->hw->get_throttle_adj(node->hw);
768
769 return snprintf(buf, PAGE_SIZE, "%u\n", val);
770}
771
772static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
773 store_throttle_adj);
774
Saravana Kannanedad3012013-09-23 19:27:57 -0700775gov_attr(guard_band_mbps, 0U, 2000U);
776gov_attr(decay_rate, 0U, 100U);
777gov_attr(io_percent, 1U, 100U);
778gov_attr(bw_step, 50U, 1000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700779gov_attr(sample_ms, 1U, 50U);
780gov_attr(up_scale, 0U, 500U);
781gov_attr(up_thres, 1U, 100U);
782gov_attr(down_thres, 0U, 90U);
783gov_attr(down_count, 0U, 90U);
784gov_attr(hist_memory, 0U, 90U);
785gov_attr(hyst_trigger_count, 0U, 90U);
786gov_attr(hyst_length, 0U, 90U);
787gov_attr(idle_mbps, 0U, 2000U);
788gov_attr(low_power_ceil_mbps, 0U, 2500U);
789gov_attr(low_power_io_percent, 1U, 100U);
790gov_attr(low_power_delay, 1U, 60U);
791gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
Saravana Kannanedad3012013-09-23 19:27:57 -0700792
793static struct attribute *dev_attr[] = {
Saravana Kannanedad3012013-09-23 19:27:57 -0700794 &dev_attr_guard_band_mbps.attr,
795 &dev_attr_decay_rate.attr,
796 &dev_attr_io_percent.attr,
797 &dev_attr_bw_step.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700798 &dev_attr_sample_ms.attr,
799 &dev_attr_up_scale.attr,
800 &dev_attr_up_thres.attr,
801 &dev_attr_down_thres.attr,
802 &dev_attr_down_count.attr,
803 &dev_attr_hist_memory.attr,
804 &dev_attr_hyst_trigger_count.attr,
805 &dev_attr_hyst_length.attr,
806 &dev_attr_idle_mbps.attr,
807 &dev_attr_low_power_ceil_mbps.attr,
808 &dev_attr_low_power_io_percent.attr,
809 &dev_attr_low_power_delay.attr,
810 &dev_attr_mbps_zones.attr,
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700811 &dev_attr_throttle_adj.attr,
Saravana Kannanedad3012013-09-23 19:27:57 -0700812 NULL,
813};
814
815static struct attribute_group dev_attr_group = {
816 .name = "bw_hwmon",
817 .attrs = dev_attr,
818};
819
820static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
821 unsigned int event, void *data)
822{
Jonathan Avila52593752017-10-04 17:00:37 -0700823 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700824 unsigned int sample_ms;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800825 struct hwmon_node *node;
826 struct bw_hwmon *hw;
Saravana Kannanedad3012013-09-23 19:27:57 -0700827
Jonathan Avila52593752017-10-04 17:00:37 -0700828 mutex_lock(&state_lock);
829
Saravana Kannanedad3012013-09-23 19:27:57 -0700830 switch (event) {
831 case DEVFREQ_GOV_START:
832 sample_ms = df->profile->polling_ms;
833 sample_ms = max(MIN_MS, sample_ms);
834 sample_ms = min(MAX_MS, sample_ms);
835 df->profile->polling_ms = sample_ms;
836
Saravana Kannancddae1b2014-08-07 19:38:02 -0700837 ret = gov_start(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700838 if (ret)
Jonathan Avila52593752017-10-04 17:00:37 -0700839 goto out;
Saravana Kannanedad3012013-09-23 19:27:57 -0700840
841 dev_dbg(df->dev.parent,
842 "Enabled dev BW HW monitor governor\n");
843 break;
844
845 case DEVFREQ_GOV_STOP:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700846 gov_stop(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700847 dev_dbg(df->dev.parent,
848 "Disabled dev BW HW monitor governor\n");
849 break;
850
851 case DEVFREQ_GOV_INTERVAL:
852 sample_ms = *(unsigned int *)data;
853 sample_ms = max(MIN_MS, sample_ms);
854 sample_ms = min(MAX_MS, sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800855 /*
856 * Suspend/resume the HW monitor around the interval update
857 * to prevent the HW monitor IRQ from trying to change
858 * stop/start the delayed workqueue while the interval update
859 * is happening.
860 */
861 node = df->data;
862 hw = node->hw;
863 hw->suspend_hwmon(hw);
Saravana Kannanedad3012013-09-23 19:27:57 -0700864 devfreq_interval_update(df, &sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800865 ret = hw->resume_hwmon(hw);
866 if (ret) {
867 dev_err(df->dev.parent,
868 "Unable to resume HW monitor (%d)\n", ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700869 goto out;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800870 }
Saravana Kannanedad3012013-09-23 19:27:57 -0700871 break;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700872
873 case DEVFREQ_GOV_SUSPEND:
874 ret = gov_suspend(df);
875 if (ret) {
876 dev_err(df->dev.parent,
877 "Unable to suspend BW HW mon governor (%d)\n",
878 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700879 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700880 }
881
882 dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
883 break;
884
885 case DEVFREQ_GOV_RESUME:
886 ret = gov_resume(df);
887 if (ret) {
888 dev_err(df->dev.parent,
889 "Unable to resume BW HW mon governor (%d)\n",
890 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700891 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700892 }
893
894 dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
895 break;
Saravana Kannanedad3012013-09-23 19:27:57 -0700896 }
897
Jonathan Avila52593752017-10-04 17:00:37 -0700898out:
899 mutex_unlock(&state_lock);
900
901 return ret;
Saravana Kannanedad3012013-09-23 19:27:57 -0700902}
903
904static struct devfreq_governor devfreq_gov_bw_hwmon = {
905 .name = "bw_hwmon",
906 .get_target_freq = devfreq_bw_hwmon_get_freq,
907 .event_handler = devfreq_bw_hwmon_ev_handler,
908};
909
910int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
911{
912 int ret = 0;
913 struct hwmon_node *node;
914 struct attribute_group *attr_grp;
915
916 if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
917 return -EINVAL;
918
919 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
920 if (!node)
921 return -ENOMEM;
922
923 if (hwmon->gov) {
924 attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
925 if (!attr_grp)
926 return -ENOMEM;
927
928 hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
929 hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
930 attr_grp->name = hwmon->gov->name;
931 attr_grp->attrs = dev_attr;
932
933 node->gov = hwmon->gov;
934 node->attr_grp = attr_grp;
935 } else {
936 node->gov = &devfreq_gov_bw_hwmon;
937 node->attr_grp = &dev_attr_group;
938 }
939
Saravana Kannanedad3012013-09-23 19:27:57 -0700940 node->guard_band_mbps = 100;
941 node->decay_rate = 90;
942 node->io_percent = 16;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700943 node->low_power_ceil_mbps = 0;
944 node->low_power_io_percent = 16;
945 node->low_power_delay = 60;
Saravana Kannanedad3012013-09-23 19:27:57 -0700946 node->bw_step = 190;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700947 node->sample_ms = 50;
948 node->up_scale = 0;
949 node->up_thres = 10;
950 node->down_thres = 0;
951 node->down_count = 3;
952 node->hist_memory = 0;
953 node->hyst_trigger_count = 3;
954 node->hyst_length = 0;
955 node->idle_mbps = 400;
956 node->mbps_zones[0] = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700957 node->hw = hwmon;
958
959 mutex_lock(&list_lock);
960 list_add_tail(&node->list, &hwmon_list);
961 mutex_unlock(&list_lock);
962
963 if (hwmon->gov) {
964 ret = devfreq_add_governor(hwmon->gov);
965 } else {
966 mutex_lock(&state_lock);
967 if (!use_cnt)
968 ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
969 if (!ret)
970 use_cnt++;
971 mutex_unlock(&state_lock);
972 }
973
974 if (!ret)
975 dev_info(dev, "BW HWmon governor registered.\n");
976 else
977 dev_err(dev, "BW HWmon governor registration failed!\n");
978
979 return ret;
980}
981
982MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
983MODULE_LICENSE("GPL v2");