blob: d7cc425c928acdc576c185a8b666de3059129629 [file] [log] [blame]
Saravana Kannanedad3012013-09-23 19:27:57 -07001/*
Saravana Kannan3536bd32016-02-18 18:28:29 -08002 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Saravana Kannanedad3012013-09-23 19:27:57 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "bw-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
Saravana Kannanb93a2752015-06-11 16:04:23 -070028#include <linux/spinlock.h>
Saravana Kannanedad3012013-09-23 19:27:57 -070029#include <linux/platform_device.h>
30#include <linux/of.h>
31#include <linux/devfreq.h>
32#include <trace/events/power.h>
33#include "governor.h"
34#include "governor_bw_hwmon.h"
35
Saravana Kannanb93a2752015-06-11 16:04:23 -070036#define NUM_MBPS_ZONES 10
Saravana Kannanedad3012013-09-23 19:27:57 -070037struct hwmon_node {
Saravana Kannanedad3012013-09-23 19:27:57 -070038 unsigned int guard_band_mbps;
39 unsigned int decay_rate;
40 unsigned int io_percent;
41 unsigned int bw_step;
Saravana Kannanb93a2752015-06-11 16:04:23 -070042 unsigned int sample_ms;
43 unsigned int up_scale;
44 unsigned int up_thres;
45 unsigned int down_thres;
46 unsigned int down_count;
47 unsigned int hist_memory;
48 unsigned int hyst_trigger_count;
49 unsigned int hyst_length;
50 unsigned int idle_mbps;
51 unsigned int low_power_ceil_mbps;
52 unsigned int low_power_io_percent;
53 unsigned int low_power_delay;
54 unsigned int mbps_zones[NUM_MBPS_ZONES];
55
Saravana Kannanedad3012013-09-23 19:27:57 -070056 unsigned long prev_ab;
57 unsigned long *dev_ab;
Saravana Kannancddae1b2014-08-07 19:38:02 -070058 unsigned long resume_freq;
59 unsigned long resume_ab;
Saravana Kannanb93a2752015-06-11 16:04:23 -070060 unsigned long bytes;
61 unsigned long max_mbps;
62 unsigned long hist_max_mbps;
63 unsigned long hist_mem;
64 unsigned long hyst_peak;
65 unsigned long hyst_mbps;
66 unsigned long hyst_trig_win;
67 unsigned long hyst_en;
68 unsigned long above_low_power;
69 unsigned long prev_req;
Saravana Kannanb93a2752015-06-11 16:04:23 -070070 unsigned int wake;
71 unsigned int down_cnt;
Saravana Kannanedad3012013-09-23 19:27:57 -070072 ktime_t prev_ts;
Saravana Kannanb93a2752015-06-11 16:04:23 -070073 ktime_t hist_max_ts;
74 bool sampled;
Saravana Kannanedad3012013-09-23 19:27:57 -070075 bool mon_started;
76 struct list_head list;
77 void *orig_data;
78 struct bw_hwmon *hw;
79 struct devfreq_governor *gov;
80 struct attribute_group *attr_grp;
81};
82
Saravana Kannanb93a2752015-06-11 16:04:23 -070083#define UP_WAKE 1
84#define DOWN_WAKE 2
85static DEFINE_SPINLOCK(irq_lock);
86
Saravana Kannanedad3012013-09-23 19:27:57 -070087static LIST_HEAD(hwmon_list);
88static DEFINE_MUTEX(list_lock);
89
90static int use_cnt;
91static DEFINE_MUTEX(state_lock);
92
93#define show_attr(name) \
94static ssize_t show_##name(struct device *dev, \
95 struct device_attribute *attr, char *buf) \
96{ \
97 struct devfreq *df = to_devfreq(dev); \
98 struct hwmon_node *hw = df->data; \
99 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
100}
101
102#define store_attr(name, _min, _max) \
103static ssize_t store_##name(struct device *dev, \
104 struct device_attribute *attr, const char *buf, \
105 size_t count) \
106{ \
107 struct devfreq *df = to_devfreq(dev); \
108 struct hwmon_node *hw = df->data; \
109 int ret; \
110 unsigned int val; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700111 ret = kstrtoint(buf, 10, &val); \
112 if (ret) \
113 return ret; \
Saravana Kannanedad3012013-09-23 19:27:57 -0700114 val = max(val, _min); \
115 val = min(val, _max); \
116 hw->name = val; \
117 return count; \
118}
119
120#define gov_attr(__attr, min, max) \
121show_attr(__attr) \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700122store_attr(__attr, (min), (max)) \
Saravana Kannanedad3012013-09-23 19:27:57 -0700123static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
124
Saravana Kannanb93a2752015-06-11 16:04:23 -0700125#define show_list_attr(name, n) \
126static ssize_t show_list_##name(struct device *dev, \
127 struct device_attribute *attr, char *buf) \
128{ \
129 struct devfreq *df = to_devfreq(dev); \
130 struct hwmon_node *hw = df->data; \
131 unsigned int i, cnt = 0; \
132 \
133 for (i = 0; i < n && hw->name[i]; i++) \
134 cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
135 cnt += snprintf(buf + cnt, PAGE_SIZE, "\n"); \
136 return cnt; \
137}
138
139#define store_list_attr(name, n, _min, _max) \
140static ssize_t store_list_##name(struct device *dev, \
141 struct device_attribute *attr, const char *buf, \
142 size_t count) \
143{ \
144 struct devfreq *df = to_devfreq(dev); \
145 struct hwmon_node *hw = df->data; \
146 int ret; \
147 unsigned int i = 0, val; \
148 \
149 do { \
150 ret = kstrtoint(buf, 10, &val); \
151 if (ret) \
152 break; \
153 buf = strnchr(buf, PAGE_SIZE, ' '); \
154 if (buf) \
155 buf++; \
156 val = max(val, _min); \
157 val = min(val, _max); \
158 hw->name[i] = val; \
159 i++; \
160 } while (buf && i < n - 1); \
161 if (i < 1) \
162 return -EINVAL; \
163 hw->name[i] = 0; \
164 return count; \
165}
166
167#define gov_list_attr(__attr, n, min, max) \
168show_list_attr(__attr, n) \
169store_list_attr(__attr, n, (min), (max)) \
170static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
171
Saravana Kannanedad3012013-09-23 19:27:57 -0700172#define MIN_MS 10U
173#define MAX_MS 500U
174
Saravana Kannanb93a2752015-06-11 16:04:23 -0700175/* Returns MBps of read/writes for the sampling window. */
176static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
Saravana Kannanedad3012013-09-23 19:27:57 -0700177{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700178 bytes *= USEC_PER_SEC;
179 do_div(bytes, us);
180 bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
181 return bytes;
182}
Saravana Kannanedad3012013-09-23 19:27:57 -0700183
Saravana Kannanb93a2752015-06-11 16:04:23 -0700184static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
185{
186 mbps *= ms;
187 mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
188 mbps *= SZ_1M;
Saravana Kannanedad3012013-09-23 19:27:57 -0700189 return mbps;
190}
191
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700192static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
Saravana Kannanedad3012013-09-23 19:27:57 -0700193{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700194 struct devfreq *df;
195 struct hwmon_node *node;
196 ktime_t ts;
197 unsigned long bytes, mbps;
198 unsigned int us;
199 int wake = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700200
Saravana Kannanb93a2752015-06-11 16:04:23 -0700201 df = hwmon->df;
202 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700203
Saravana Kannanb93a2752015-06-11 16:04:23 -0700204 ts = ktime_get();
205 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
206
207 bytes = hwmon->get_bytes_and_clear(hwmon);
208 bytes += node->bytes;
209 node->bytes = 0;
210
211 mbps = bytes_to_mbps(bytes, us);
212 node->max_mbps = max(node->max_mbps, mbps);
213
214 /*
215 * If the measured bandwidth in a micro sample is greater than the
216 * wake up threshold, it indicates an increase in load that's non
217 * trivial. So, have the governor ignore historical idle time or low
218 * bandwidth usage and do the bandwidth calculation based on just
219 * this micro sample.
220 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700221 if (mbps > node->hw->up_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700222 wake = UP_WAKE;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700223 } else if (mbps < node->hw->down_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700224 if (node->down_cnt)
225 node->down_cnt--;
226 if (node->down_cnt <= 0)
227 wake = DOWN_WAKE;
228 }
229
230 node->prev_ts = ts;
231 node->wake = wake;
232 node->sampled = true;
233
234 trace_bw_hwmon_meas(dev_name(df->dev.parent),
235 mbps,
236 us,
237 wake);
238
239 return wake;
240}
241
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700242static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
243{
244 struct devfreq *df;
245 struct hwmon_node *node;
246 unsigned long bytes, mbps;
247 int wake = 0;
248
249 df = hwmon->df;
250 node = df->data;
251
252 /*
253 * If this read is in response to an IRQ, the HW monitor should
254 * return the measurement in the micro sample that triggered the IRQ.
255 * Otherwise, it should return the maximum measured value in any
256 * micro sample since the last time we called get_bytes_and_clear()
257 */
258 bytes = hwmon->get_bytes_and_clear(hwmon);
259 mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
260 node->max_mbps = mbps;
261
262 if (mbps > node->hw->up_wake_mbps)
263 wake = UP_WAKE;
264 else if (mbps < node->hw->down_wake_mbps)
265 wake = DOWN_WAKE;
266
267 node->wake = wake;
268 node->sampled = true;
269
270 trace_bw_hwmon_meas(dev_name(df->dev.parent),
271 mbps,
272 node->sample_ms * USEC_PER_MSEC,
273 wake);
274
275 return 1;
276}
277
278static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
279{
280 if (hwmon->set_hw_events)
281 return __bw_hwmon_hw_sample_end(hwmon);
282 else
283 return __bw_hwmon_sw_sample_end(hwmon);
284}
285
Saravana Kannanb93a2752015-06-11 16:04:23 -0700286int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
287{
288 unsigned long flags;
289 int wake;
290
291 spin_lock_irqsave(&irq_lock, flags);
292 wake = __bw_hwmon_sample_end(hwmon);
293 spin_unlock_irqrestore(&irq_lock, flags);
294
295 return wake;
296}
297
298unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
299{
300 int i;
301
302 for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
303 if (node->mbps_zones[i] >= mbps)
304 return node->mbps_zones[i];
305
306 return node->hw->df->max_freq;
307}
308
309#define MIN_MBPS 500UL
310#define HIST_PEAK_TOL 60
311static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
312 unsigned long *freq, unsigned long *ab)
313{
314 unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
315 unsigned long meas_mbps_zone;
316 unsigned long hist_lo_tol, hyst_lo_tol;
317 struct bw_hwmon *hw = node->hw;
318 unsigned int new_bw, io_percent;
319 ktime_t ts;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700320 unsigned int ms = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700321
322 spin_lock_irqsave(&irq_lock, flags);
323
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700324 if (!hw->set_hw_events) {
325 ts = ktime_get();
326 ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
327 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700328 if (!node->sampled || ms >= node->sample_ms)
329 __bw_hwmon_sample_end(node->hw);
330 node->sampled = false;
331
332 req_mbps = meas_mbps = node->max_mbps;
333 node->max_mbps = 0;
334
335 hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
336 /* Remember historic peak in the past hist_mem decision windows. */
337 if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
338 /* If new max or no history */
339 node->hist_max_mbps = meas_mbps;
340 node->hist_mem = node->hist_memory;
341 } else if (meas_mbps >= hist_lo_tol) {
342 /*
343 * If subsequent peaks come close (within tolerance) to but
344 * less than the historic peak, then reset the history start,
345 * but not the peak value.
346 */
347 node->hist_mem = node->hist_memory;
Saravana Kannanedad3012013-09-23 19:27:57 -0700348 } else {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700349 /* Count down history expiration. */
350 if (node->hist_mem)
351 node->hist_mem--;
352 }
353
354 /* Keep track of whether we are in low power mode consistently. */
355 if (meas_mbps > node->low_power_ceil_mbps)
356 node->above_low_power = node->low_power_delay;
357 if (node->above_low_power)
358 node->above_low_power--;
359
360 if (node->above_low_power)
361 io_percent = node->io_percent;
362 else
363 io_percent = node->low_power_io_percent;
364
365 /*
366 * The AB value that corresponds to the lowest mbps zone greater than
367 * or equal to the "frequency" the current measurement will pick.
368 * This upper limit is useful for balancing out any prediction
369 * mechanisms to be power friendly.
370 */
371 meas_mbps_zone = (meas_mbps * 100) / io_percent;
372 meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
373 meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
374 meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
375
376 /*
377 * If this is a wake up due to BW increase, vote much higher BW than
378 * what we measure to stay ahead of increasing traffic and then set
379 * it up to vote for measured BW if we see down_count short sample
380 * windows of low traffic.
381 */
382 if (node->wake == UP_WAKE) {
383 req_mbps += ((meas_mbps - node->prev_req)
384 * node->up_scale) / 100;
385 /*
386 * However if the measured load is less than the historic
387 * peak, but the over request is higher than the historic
388 * peak, then we could limit the over requesting to the
389 * historic peak.
390 */
391 if (req_mbps > node->hist_max_mbps
392 && meas_mbps < node->hist_max_mbps)
393 req_mbps = node->hist_max_mbps;
394
395 req_mbps = min(req_mbps, meas_mbps_zone);
396 }
397
398 hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
399 if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
400 hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
401 node->hyst_peak = 0;
402 node->hyst_trig_win = node->hyst_length;
403 node->hyst_mbps = meas_mbps;
404 }
405
406 /*
407 * Check node->max_mbps to avoid double counting peaks that cause
408 * early termination of a window.
409 */
410 if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
411 && !node->max_mbps) {
412 node->hyst_peak++;
413 if (node->hyst_peak >= node->hyst_trigger_count
414 || node->hyst_en)
415 node->hyst_en = node->hyst_length;
416 }
417
418 if (node->hyst_trig_win)
419 node->hyst_trig_win--;
420 if (node->hyst_en)
421 node->hyst_en--;
422
423 if (!node->hyst_trig_win && !node->hyst_en) {
424 node->hyst_peak = 0;
425 node->hyst_mbps = 0;
426 }
427
428 if (node->hyst_en) {
429 if (meas_mbps > node->idle_mbps)
430 req_mbps = max(req_mbps, node->hyst_mbps);
431 }
432
433 /* Stretch the short sample window size, if the traffic is too low */
434 if (meas_mbps < MIN_MBPS) {
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700435 hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
Saravana Kannanb93a2752015-06-11 16:04:23 -0700436 * (100 + node->up_thres)) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700437 hw->down_wake_mbps = 0;
438 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700439 thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
440 node->sample_ms);
441 } else {
442 /*
443 * Up wake vs down wake are intentionally a percentage of
444 * req_mbps vs meas_mbps to make sure the over requesting
445 * phase is handled properly. We only want to wake up and
446 * reduce the vote based on the measured mbps being less than
447 * the previous measurement that caused the "over request".
448 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700449 hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
450 hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
451 if (node->wake == UP_WAKE)
452 hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
453 else
454 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700455 thres = mbps_to_bytes(meas_mbps, node->sample_ms);
456 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700457
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700458 if (hw->set_hw_events) {
459 hw->down_cnt = node->down_count;
460 hw->set_hw_events(hw, node->sample_ms);
461 } else {
462 node->down_cnt = node->down_count;
463 node->bytes = hw->set_thres(hw, thres);
464 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700465
466 node->wake = 0;
467 node->prev_req = req_mbps;
468
469 spin_unlock_irqrestore(&irq_lock, flags);
470
471 adj_mbps = req_mbps + node->guard_band_mbps;
472
473 if (adj_mbps > node->prev_ab) {
474 new_bw = adj_mbps;
475 } else {
476 new_bw = adj_mbps * node->decay_rate
Saravana Kannanedad3012013-09-23 19:27:57 -0700477 + node->prev_ab * (100 - node->decay_rate);
478 new_bw /= 100;
479 }
480
481 node->prev_ab = new_bw;
482 if (ab)
483 *ab = roundup(new_bw, node->bw_step);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700484
485 *freq = (new_bw * 100) / io_percent;
Saravana Kannanedad3012013-09-23 19:27:57 -0700486 trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
487 new_bw,
488 *freq,
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700489 hw->up_wake_mbps,
490 hw->down_wake_mbps);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700491 return req_mbps;
Saravana Kannanedad3012013-09-23 19:27:57 -0700492}
493
494static struct hwmon_node *find_hwmon_node(struct devfreq *df)
495{
496 struct hwmon_node *node, *found = NULL;
497
498 mutex_lock(&list_lock);
499 list_for_each_entry(node, &hwmon_list, list)
500 if (node->hw->dev == df->dev.parent ||
501 node->hw->of_node == df->dev.parent->of_node ||
502 (!node->hw->dev && !node->hw->of_node &&
503 node->gov == df->governor)) {
504 found = node;
505 break;
506 }
507 mutex_unlock(&list_lock);
508
509 return found;
510}
511
Saravana Kannanedad3012013-09-23 19:27:57 -0700512int update_bw_hwmon(struct bw_hwmon *hwmon)
513{
514 struct devfreq *df;
515 struct hwmon_node *node;
Saravana Kannanedad3012013-09-23 19:27:57 -0700516 int ret;
517
518 if (!hwmon)
519 return -EINVAL;
520 df = hwmon->df;
521 if (!df)
522 return -ENODEV;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700523 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700524 if (!node)
525 return -ENODEV;
526
527 if (!node->mon_started)
528 return -EBUSY;
529
530 dev_dbg(df->dev.parent, "Got update request\n");
531 devfreq_monitor_stop(df);
532
Saravana Kannanb93a2752015-06-11 16:04:23 -0700533 mutex_lock(&df->lock);
534 ret = update_devfreq(df);
535 if (ret)
536 dev_err(df->dev.parent,
537 "Unable to update freq on request!\n");
538 mutex_unlock(&df->lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700539
540 devfreq_monitor_start(df);
541
542 return 0;
543}
544
Saravana Kannancddae1b2014-08-07 19:38:02 -0700545static int start_monitor(struct devfreq *df, bool init)
546{
547 struct hwmon_node *node = df->data;
548 struct bw_hwmon *hw = node->hw;
549 struct device *dev = df->dev.parent;
550 unsigned long mbps;
551 int ret;
552
553 node->prev_ts = ktime_get();
554
555 if (init) {
556 node->prev_ab = 0;
557 node->resume_freq = 0;
558 node->resume_ab = 0;
559 mbps = (df->previous_freq * node->io_percent) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700560 hw->up_wake_mbps = mbps;
561 hw->down_wake_mbps = MIN_MBPS;
562 hw->undo_over_req_mbps = 0;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700563 ret = hw->start_hwmon(hw, mbps);
564 } else {
565 ret = hw->resume_hwmon(hw);
566 }
567
568 if (ret) {
569 dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
570 return ret;
571 }
572
573 if (init)
574 devfreq_monitor_start(df);
575 else
576 devfreq_monitor_resume(df);
577
578 node->mon_started = true;
579
580 return 0;
581}
582
583static void stop_monitor(struct devfreq *df, bool init)
584{
585 struct hwmon_node *node = df->data;
586 struct bw_hwmon *hw = node->hw;
587
588 node->mon_started = false;
589
590 if (init) {
591 devfreq_monitor_stop(df);
592 hw->stop_hwmon(hw);
593 } else {
594 devfreq_monitor_suspend(df);
595 hw->suspend_hwmon(hw);
596 }
597
598}
599
600static int gov_start(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700601{
602 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700603 struct device *dev = df->dev.parent;
604 struct hwmon_node *node;
605 struct bw_hwmon *hw;
606 struct devfreq_dev_status stat;
607
608 node = find_hwmon_node(df);
609 if (!node) {
610 dev_err(dev, "Unable to find HW monitor!\n");
611 return -ENODEV;
612 }
613 hw = node->hw;
614
615 stat.private_data = NULL;
616 if (df->profile->get_dev_status)
617 ret = df->profile->get_dev_status(df->dev.parent, &stat);
618 if (ret || !stat.private_data)
619 dev_warn(dev, "Device doesn't take AB votes!\n");
620 else
621 node->dev_ab = stat.private_data;
622
623 hw->df = df;
624 node->orig_data = df->data;
625 df->data = node;
626
Saravana Kannancddae1b2014-08-07 19:38:02 -0700627 if (start_monitor(df, true))
Saravana Kannanedad3012013-09-23 19:27:57 -0700628 goto err_start;
Saravana Kannanedad3012013-09-23 19:27:57 -0700629
630 ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
631 if (ret)
632 goto err_sysfs;
633
634 return 0;
635
636err_sysfs:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700637 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700638err_start:
639 df->data = node->orig_data;
640 node->orig_data = NULL;
641 hw->df = NULL;
642 node->dev_ab = NULL;
643 return ret;
644}
645
Saravana Kannancddae1b2014-08-07 19:38:02 -0700646static void gov_stop(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700647{
648 struct hwmon_node *node = df->data;
649 struct bw_hwmon *hw = node->hw;
650
651 sysfs_remove_group(&df->dev.kobj, node->attr_grp);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700652 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700653 df->data = node->orig_data;
654 node->orig_data = NULL;
655 hw->df = NULL;
656 /*
657 * Not all governors know about this additional extended device
658 * configuration. To avoid leaving the extended configuration at a
659 * stale state, set it to 0 and let the next governor take it from
660 * there.
661 */
662 if (node->dev_ab)
663 *node->dev_ab = 0;
664 node->dev_ab = NULL;
665}
666
Saravana Kannancddae1b2014-08-07 19:38:02 -0700667static int gov_suspend(struct devfreq *df)
668{
669 struct hwmon_node *node = df->data;
670 unsigned long resume_freq = df->previous_freq;
671 unsigned long resume_ab = *node->dev_ab;
672
673 if (!node->hw->suspend_hwmon)
674 return -ENOSYS;
675
676 if (node->resume_freq) {
677 dev_warn(df->dev.parent, "Governor already suspended!\n");
678 return -EBUSY;
679 }
680
681 stop_monitor(df, false);
682
683 mutex_lock(&df->lock);
684 update_devfreq(df);
685 mutex_unlock(&df->lock);
686
687 node->resume_freq = resume_freq;
688 node->resume_ab = resume_ab;
689
690 return 0;
691}
692
693static int gov_resume(struct devfreq *df)
694{
695 struct hwmon_node *node = df->data;
696
697 if (!node->hw->resume_hwmon)
698 return -ENOSYS;
699
700 if (!node->resume_freq) {
701 dev_warn(df->dev.parent, "Governor already resumed!\n");
702 return -EBUSY;
703 }
704
705 mutex_lock(&df->lock);
706 update_devfreq(df);
707 mutex_unlock(&df->lock);
708
709 node->resume_freq = 0;
710 node->resume_ab = 0;
711
712 return start_monitor(df, false);
713}
714
Saravana Kannanedad3012013-09-23 19:27:57 -0700715static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
716 unsigned long *freq)
717{
Saravana Kannanedad3012013-09-23 19:27:57 -0700718 struct hwmon_node *node = df->data;
719
Saravana Kannancddae1b2014-08-07 19:38:02 -0700720 /* Suspend/resume sequence */
721 if (!node->mon_started) {
722 *freq = node->resume_freq;
723 *node->dev_ab = node->resume_ab;
724 return 0;
725 }
726
Saravana Kannanb93a2752015-06-11 16:04:23 -0700727 get_bw_and_set_irq(node, freq, node->dev_ab);
Saravana Kannanedad3012013-09-23 19:27:57 -0700728
729 return 0;
730}
731
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700732static ssize_t store_throttle_adj(struct device *dev,
733 struct device_attribute *attr, const char *buf, size_t count)
734{
735 struct devfreq *df = to_devfreq(dev);
736 struct hwmon_node *node = df->data;
737 int ret;
738 unsigned int val;
739
740 if (!node->hw->set_throttle_adj)
741 return -ENOSYS;
742
743 ret = kstrtouint(buf, 10, &val);
744 if (ret)
745 return ret;
746
747 ret = node->hw->set_throttle_adj(node->hw, val);
748
749 if (!ret)
750 return count;
751 else
752 return ret;
753}
754
755static ssize_t show_throttle_adj(struct device *dev,
756 struct device_attribute *attr, char *buf)
757{
758 struct devfreq *df = to_devfreq(dev);
759 struct hwmon_node *node = df->data;
760 unsigned int val;
761
762 if (!node->hw->get_throttle_adj)
763 val = 0;
764 else
765 val = node->hw->get_throttle_adj(node->hw);
766
767 return snprintf(buf, PAGE_SIZE, "%u\n", val);
768}
769
770static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
771 store_throttle_adj);
772
Saravana Kannanedad3012013-09-23 19:27:57 -0700773gov_attr(guard_band_mbps, 0U, 2000U);
774gov_attr(decay_rate, 0U, 100U);
775gov_attr(io_percent, 1U, 100U);
776gov_attr(bw_step, 50U, 1000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700777gov_attr(sample_ms, 1U, 50U);
778gov_attr(up_scale, 0U, 500U);
779gov_attr(up_thres, 1U, 100U);
780gov_attr(down_thres, 0U, 90U);
781gov_attr(down_count, 0U, 90U);
782gov_attr(hist_memory, 0U, 90U);
783gov_attr(hyst_trigger_count, 0U, 90U);
784gov_attr(hyst_length, 0U, 90U);
785gov_attr(idle_mbps, 0U, 2000U);
786gov_attr(low_power_ceil_mbps, 0U, 2500U);
787gov_attr(low_power_io_percent, 1U, 100U);
788gov_attr(low_power_delay, 1U, 60U);
789gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
Saravana Kannanedad3012013-09-23 19:27:57 -0700790
791static struct attribute *dev_attr[] = {
Saravana Kannanedad3012013-09-23 19:27:57 -0700792 &dev_attr_guard_band_mbps.attr,
793 &dev_attr_decay_rate.attr,
794 &dev_attr_io_percent.attr,
795 &dev_attr_bw_step.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700796 &dev_attr_sample_ms.attr,
797 &dev_attr_up_scale.attr,
798 &dev_attr_up_thres.attr,
799 &dev_attr_down_thres.attr,
800 &dev_attr_down_count.attr,
801 &dev_attr_hist_memory.attr,
802 &dev_attr_hyst_trigger_count.attr,
803 &dev_attr_hyst_length.attr,
804 &dev_attr_idle_mbps.attr,
805 &dev_attr_low_power_ceil_mbps.attr,
806 &dev_attr_low_power_io_percent.attr,
807 &dev_attr_low_power_delay.attr,
808 &dev_attr_mbps_zones.attr,
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700809 &dev_attr_throttle_adj.attr,
Saravana Kannanedad3012013-09-23 19:27:57 -0700810 NULL,
811};
812
813static struct attribute_group dev_attr_group = {
814 .name = "bw_hwmon",
815 .attrs = dev_attr,
816};
817
818static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
819 unsigned int event, void *data)
820{
821 int ret;
822 unsigned int sample_ms;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800823 struct hwmon_node *node;
824 struct bw_hwmon *hw;
Saravana Kannanedad3012013-09-23 19:27:57 -0700825
826 switch (event) {
827 case DEVFREQ_GOV_START:
828 sample_ms = df->profile->polling_ms;
829 sample_ms = max(MIN_MS, sample_ms);
830 sample_ms = min(MAX_MS, sample_ms);
831 df->profile->polling_ms = sample_ms;
832
Saravana Kannancddae1b2014-08-07 19:38:02 -0700833 ret = gov_start(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700834 if (ret)
835 return ret;
836
837 dev_dbg(df->dev.parent,
838 "Enabled dev BW HW monitor governor\n");
839 break;
840
841 case DEVFREQ_GOV_STOP:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700842 gov_stop(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700843 dev_dbg(df->dev.parent,
844 "Disabled dev BW HW monitor governor\n");
845 break;
846
847 case DEVFREQ_GOV_INTERVAL:
848 sample_ms = *(unsigned int *)data;
849 sample_ms = max(MIN_MS, sample_ms);
850 sample_ms = min(MAX_MS, sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800851 /*
852 * Suspend/resume the HW monitor around the interval update
853 * to prevent the HW monitor IRQ from trying to change
854 * stop/start the delayed workqueue while the interval update
855 * is happening.
856 */
857 node = df->data;
858 hw = node->hw;
859 hw->suspend_hwmon(hw);
Saravana Kannanedad3012013-09-23 19:27:57 -0700860 devfreq_interval_update(df, &sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800861 ret = hw->resume_hwmon(hw);
862 if (ret) {
863 dev_err(df->dev.parent,
864 "Unable to resume HW monitor (%d)\n", ret);
865 return ret;
866 }
Saravana Kannanedad3012013-09-23 19:27:57 -0700867 break;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700868
869 case DEVFREQ_GOV_SUSPEND:
870 ret = gov_suspend(df);
871 if (ret) {
872 dev_err(df->dev.parent,
873 "Unable to suspend BW HW mon governor (%d)\n",
874 ret);
875 return ret;
876 }
877
878 dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
879 break;
880
881 case DEVFREQ_GOV_RESUME:
882 ret = gov_resume(df);
883 if (ret) {
884 dev_err(df->dev.parent,
885 "Unable to resume BW HW mon governor (%d)\n",
886 ret);
887 return ret;
888 }
889
890 dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
891 break;
Saravana Kannanedad3012013-09-23 19:27:57 -0700892 }
893
894 return 0;
895}
896
897static struct devfreq_governor devfreq_gov_bw_hwmon = {
898 .name = "bw_hwmon",
899 .get_target_freq = devfreq_bw_hwmon_get_freq,
900 .event_handler = devfreq_bw_hwmon_ev_handler,
901};
902
903int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
904{
905 int ret = 0;
906 struct hwmon_node *node;
907 struct attribute_group *attr_grp;
908
909 if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
910 return -EINVAL;
911
912 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
913 if (!node)
914 return -ENOMEM;
915
916 if (hwmon->gov) {
917 attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
918 if (!attr_grp)
919 return -ENOMEM;
920
921 hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
922 hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
923 attr_grp->name = hwmon->gov->name;
924 attr_grp->attrs = dev_attr;
925
926 node->gov = hwmon->gov;
927 node->attr_grp = attr_grp;
928 } else {
929 node->gov = &devfreq_gov_bw_hwmon;
930 node->attr_grp = &dev_attr_group;
931 }
932
Saravana Kannanedad3012013-09-23 19:27:57 -0700933 node->guard_band_mbps = 100;
934 node->decay_rate = 90;
935 node->io_percent = 16;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700936 node->low_power_ceil_mbps = 0;
937 node->low_power_io_percent = 16;
938 node->low_power_delay = 60;
Saravana Kannanedad3012013-09-23 19:27:57 -0700939 node->bw_step = 190;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700940 node->sample_ms = 50;
941 node->up_scale = 0;
942 node->up_thres = 10;
943 node->down_thres = 0;
944 node->down_count = 3;
945 node->hist_memory = 0;
946 node->hyst_trigger_count = 3;
947 node->hyst_length = 0;
948 node->idle_mbps = 400;
949 node->mbps_zones[0] = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700950 node->hw = hwmon;
951
952 mutex_lock(&list_lock);
953 list_add_tail(&node->list, &hwmon_list);
954 mutex_unlock(&list_lock);
955
956 if (hwmon->gov) {
957 ret = devfreq_add_governor(hwmon->gov);
958 } else {
959 mutex_lock(&state_lock);
960 if (!use_cnt)
961 ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
962 if (!ret)
963 use_cnt++;
964 mutex_unlock(&state_lock);
965 }
966
967 if (!ret)
968 dev_info(dev, "BW HWmon governor registered.\n");
969 else
970 dev_err(dev, "BW HWmon governor registration failed!\n");
971
972 return ret;
973}
974
975MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
976MODULE_LICENSE("GPL v2");