blob: 3026bc22b374ad63ec98dd00cee43a475a4e0fb7 [file] [log] [blame]
Saravana Kannanedad3012013-09-23 19:27:57 -07001/*
Rohit Guptafe241a02017-04-26 18:46:50 -07002 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Saravana Kannanedad3012013-09-23 19:27:57 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "bw-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
Saravana Kannanb93a2752015-06-11 16:04:23 -070028#include <linux/spinlock.h>
Saravana Kannanedad3012013-09-23 19:27:57 -070029#include <linux/platform_device.h>
30#include <linux/of.h>
31#include <linux/devfreq.h>
32#include <trace/events/power.h>
33#include "governor.h"
34#include "governor_bw_hwmon.h"
35
Saravana Kannanb93a2752015-06-11 16:04:23 -070036#define NUM_MBPS_ZONES 10
Saravana Kannanedad3012013-09-23 19:27:57 -070037struct hwmon_node {
Saravana Kannanedad3012013-09-23 19:27:57 -070038 unsigned int guard_band_mbps;
39 unsigned int decay_rate;
40 unsigned int io_percent;
41 unsigned int bw_step;
Saravana Kannanb93a2752015-06-11 16:04:23 -070042 unsigned int sample_ms;
43 unsigned int up_scale;
44 unsigned int up_thres;
45 unsigned int down_thres;
46 unsigned int down_count;
47 unsigned int hist_memory;
48 unsigned int hyst_trigger_count;
49 unsigned int hyst_length;
50 unsigned int idle_mbps;
Saravana Kannanb93a2752015-06-11 16:04:23 -070051 unsigned int mbps_zones[NUM_MBPS_ZONES];
52
Saravana Kannanedad3012013-09-23 19:27:57 -070053 unsigned long prev_ab;
54 unsigned long *dev_ab;
Saravana Kannancddae1b2014-08-07 19:38:02 -070055 unsigned long resume_freq;
56 unsigned long resume_ab;
Saravana Kannanb93a2752015-06-11 16:04:23 -070057 unsigned long bytes;
58 unsigned long max_mbps;
59 unsigned long hist_max_mbps;
60 unsigned long hist_mem;
61 unsigned long hyst_peak;
62 unsigned long hyst_mbps;
63 unsigned long hyst_trig_win;
64 unsigned long hyst_en;
Saravana Kannanb93a2752015-06-11 16:04:23 -070065 unsigned long prev_req;
Saravana Kannanb93a2752015-06-11 16:04:23 -070066 unsigned int wake;
67 unsigned int down_cnt;
Saravana Kannanedad3012013-09-23 19:27:57 -070068 ktime_t prev_ts;
Saravana Kannanb93a2752015-06-11 16:04:23 -070069 ktime_t hist_max_ts;
70 bool sampled;
Saravana Kannanedad3012013-09-23 19:27:57 -070071 bool mon_started;
72 struct list_head list;
73 void *orig_data;
74 struct bw_hwmon *hw;
75 struct devfreq_governor *gov;
76 struct attribute_group *attr_grp;
77};
78
Saravana Kannanb93a2752015-06-11 16:04:23 -070079#define UP_WAKE 1
80#define DOWN_WAKE 2
81static DEFINE_SPINLOCK(irq_lock);
82
Saravana Kannanedad3012013-09-23 19:27:57 -070083static LIST_HEAD(hwmon_list);
84static DEFINE_MUTEX(list_lock);
85
86static int use_cnt;
87static DEFINE_MUTEX(state_lock);
88
89#define show_attr(name) \
90static ssize_t show_##name(struct device *dev, \
91 struct device_attribute *attr, char *buf) \
92{ \
93 struct devfreq *df = to_devfreq(dev); \
94 struct hwmon_node *hw = df->data; \
95 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
96}
97
98#define store_attr(name, _min, _max) \
99static ssize_t store_##name(struct device *dev, \
100 struct device_attribute *attr, const char *buf, \
101 size_t count) \
102{ \
103 struct devfreq *df = to_devfreq(dev); \
104 struct hwmon_node *hw = df->data; \
105 int ret; \
106 unsigned int val; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700107 ret = kstrtoint(buf, 10, &val); \
108 if (ret) \
109 return ret; \
Saravana Kannanedad3012013-09-23 19:27:57 -0700110 val = max(val, _min); \
111 val = min(val, _max); \
112 hw->name = val; \
113 return count; \
114}
115
116#define gov_attr(__attr, min, max) \
117show_attr(__attr) \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700118store_attr(__attr, (min), (max)) \
Saravana Kannanedad3012013-09-23 19:27:57 -0700119static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
120
Saravana Kannanb93a2752015-06-11 16:04:23 -0700121#define show_list_attr(name, n) \
122static ssize_t show_list_##name(struct device *dev, \
123 struct device_attribute *attr, char *buf) \
124{ \
125 struct devfreq *df = to_devfreq(dev); \
126 struct hwmon_node *hw = df->data; \
127 unsigned int i, cnt = 0; \
128 \
129 for (i = 0; i < n && hw->name[i]; i++) \
130 cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
131 cnt += snprintf(buf + cnt, PAGE_SIZE, "\n"); \
132 return cnt; \
133}
134
135#define store_list_attr(name, n, _min, _max) \
136static ssize_t store_list_##name(struct device *dev, \
137 struct device_attribute *attr, const char *buf, \
138 size_t count) \
139{ \
140 struct devfreq *df = to_devfreq(dev); \
141 struct hwmon_node *hw = df->data; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700142 int ret, numvals; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700143 unsigned int i = 0, val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700144 char **strlist; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700145 \
Rohit Guptafe241a02017-04-26 18:46:50 -0700146 strlist = argv_split(GFP_KERNEL, buf, &numvals); \
147 if (!strlist) \
148 return -ENOMEM; \
149 numvals = min(numvals, n - 1); \
150 for (i = 0; i < numvals; i++) { \
151 ret = kstrtouint(strlist[i], 10, &val); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700152 if (ret) \
Rohit Guptafe241a02017-04-26 18:46:50 -0700153 goto out; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700154 val = max(val, _min); \
155 val = min(val, _max); \
156 hw->name[i] = val; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700157 } \
158 ret = count; \
159out: \
160 argv_free(strlist); \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700161 hw->name[i] = 0; \
Rohit Guptafe241a02017-04-26 18:46:50 -0700162 return ret; \
Saravana Kannanb93a2752015-06-11 16:04:23 -0700163}
164
165#define gov_list_attr(__attr, n, min, max) \
166show_list_attr(__attr, n) \
167store_list_attr(__attr, n, (min), (max)) \
168static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
169
Saravana Kannanedad3012013-09-23 19:27:57 -0700170#define MIN_MS 10U
171#define MAX_MS 500U
172
Saravana Kannanb93a2752015-06-11 16:04:23 -0700173/* Returns MBps of read/writes for the sampling window. */
174static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
Saravana Kannanedad3012013-09-23 19:27:57 -0700175{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700176 bytes *= USEC_PER_SEC;
177 do_div(bytes, us);
178 bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
179 return bytes;
180}
Saravana Kannanedad3012013-09-23 19:27:57 -0700181
Saravana Kannanb93a2752015-06-11 16:04:23 -0700182static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
183{
184 mbps *= ms;
185 mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
186 mbps *= SZ_1M;
Saravana Kannanedad3012013-09-23 19:27:57 -0700187 return mbps;
188}
189
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700190static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
Saravana Kannanedad3012013-09-23 19:27:57 -0700191{
Saravana Kannanb93a2752015-06-11 16:04:23 -0700192 struct devfreq *df;
193 struct hwmon_node *node;
194 ktime_t ts;
195 unsigned long bytes, mbps;
196 unsigned int us;
197 int wake = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700198
Saravana Kannanb93a2752015-06-11 16:04:23 -0700199 df = hwmon->df;
200 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700201
Saravana Kannanb93a2752015-06-11 16:04:23 -0700202 ts = ktime_get();
203 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
204
205 bytes = hwmon->get_bytes_and_clear(hwmon);
206 bytes += node->bytes;
207 node->bytes = 0;
208
209 mbps = bytes_to_mbps(bytes, us);
210 node->max_mbps = max(node->max_mbps, mbps);
211
212 /*
213 * If the measured bandwidth in a micro sample is greater than the
214 * wake up threshold, it indicates an increase in load that's non
215 * trivial. So, have the governor ignore historical idle time or low
216 * bandwidth usage and do the bandwidth calculation based on just
217 * this micro sample.
218 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700219 if (mbps > node->hw->up_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700220 wake = UP_WAKE;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700221 } else if (mbps < node->hw->down_wake_mbps) {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700222 if (node->down_cnt)
223 node->down_cnt--;
224 if (node->down_cnt <= 0)
225 wake = DOWN_WAKE;
226 }
227
228 node->prev_ts = ts;
229 node->wake = wake;
230 node->sampled = true;
231
232 trace_bw_hwmon_meas(dev_name(df->dev.parent),
233 mbps,
234 us,
235 wake);
236
237 return wake;
238}
239
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700240static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
241{
242 struct devfreq *df;
243 struct hwmon_node *node;
244 unsigned long bytes, mbps;
245 int wake = 0;
246
247 df = hwmon->df;
248 node = df->data;
249
250 /*
251 * If this read is in response to an IRQ, the HW monitor should
252 * return the measurement in the micro sample that triggered the IRQ.
253 * Otherwise, it should return the maximum measured value in any
254 * micro sample since the last time we called get_bytes_and_clear()
255 */
256 bytes = hwmon->get_bytes_and_clear(hwmon);
257 mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
258 node->max_mbps = mbps;
259
260 if (mbps > node->hw->up_wake_mbps)
261 wake = UP_WAKE;
262 else if (mbps < node->hw->down_wake_mbps)
263 wake = DOWN_WAKE;
264
265 node->wake = wake;
266 node->sampled = true;
267
268 trace_bw_hwmon_meas(dev_name(df->dev.parent),
269 mbps,
270 node->sample_ms * USEC_PER_MSEC,
271 wake);
272
273 return 1;
274}
275
276static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
277{
278 if (hwmon->set_hw_events)
279 return __bw_hwmon_hw_sample_end(hwmon);
280 else
281 return __bw_hwmon_sw_sample_end(hwmon);
282}
283
Saravana Kannanb93a2752015-06-11 16:04:23 -0700284int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
285{
286 unsigned long flags;
287 int wake;
288
289 spin_lock_irqsave(&irq_lock, flags);
290 wake = __bw_hwmon_sample_end(hwmon);
291 spin_unlock_irqrestore(&irq_lock, flags);
292
293 return wake;
294}
295
296unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
297{
298 int i;
299
300 for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
301 if (node->mbps_zones[i] >= mbps)
302 return node->mbps_zones[i];
303
304 return node->hw->df->max_freq;
305}
306
307#define MIN_MBPS 500UL
308#define HIST_PEAK_TOL 60
309static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
310 unsigned long *freq, unsigned long *ab)
311{
312 unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
313 unsigned long meas_mbps_zone;
314 unsigned long hist_lo_tol, hyst_lo_tol;
315 struct bw_hwmon *hw = node->hw;
Saravana Kannan8b71d222017-10-26 15:08:28 -0700316 unsigned int new_bw, io_percent = node->io_percent;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700317 ktime_t ts;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700318 unsigned int ms = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700319
320 spin_lock_irqsave(&irq_lock, flags);
321
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700322 if (!hw->set_hw_events) {
323 ts = ktime_get();
324 ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
325 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700326 if (!node->sampled || ms >= node->sample_ms)
327 __bw_hwmon_sample_end(node->hw);
328 node->sampled = false;
329
330 req_mbps = meas_mbps = node->max_mbps;
331 node->max_mbps = 0;
332
333 hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
334 /* Remember historic peak in the past hist_mem decision windows. */
335 if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
336 /* If new max or no history */
337 node->hist_max_mbps = meas_mbps;
338 node->hist_mem = node->hist_memory;
339 } else if (meas_mbps >= hist_lo_tol) {
340 /*
341 * If subsequent peaks come close (within tolerance) to but
342 * less than the historic peak, then reset the history start,
343 * but not the peak value.
344 */
345 node->hist_mem = node->hist_memory;
Saravana Kannanedad3012013-09-23 19:27:57 -0700346 } else {
Saravana Kannanb93a2752015-06-11 16:04:23 -0700347 /* Count down history expiration. */
348 if (node->hist_mem)
349 node->hist_mem--;
350 }
351
Saravana Kannanb93a2752015-06-11 16:04:23 -0700352 /*
353 * The AB value that corresponds to the lowest mbps zone greater than
354 * or equal to the "frequency" the current measurement will pick.
355 * This upper limit is useful for balancing out any prediction
356 * mechanisms to be power friendly.
357 */
358 meas_mbps_zone = (meas_mbps * 100) / io_percent;
359 meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
360 meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
361 meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
362
363 /*
364 * If this is a wake up due to BW increase, vote much higher BW than
365 * what we measure to stay ahead of increasing traffic and then set
366 * it up to vote for measured BW if we see down_count short sample
367 * windows of low traffic.
368 */
369 if (node->wake == UP_WAKE) {
370 req_mbps += ((meas_mbps - node->prev_req)
371 * node->up_scale) / 100;
372 /*
373 * However if the measured load is less than the historic
374 * peak, but the over request is higher than the historic
375 * peak, then we could limit the over requesting to the
376 * historic peak.
377 */
378 if (req_mbps > node->hist_max_mbps
379 && meas_mbps < node->hist_max_mbps)
380 req_mbps = node->hist_max_mbps;
381
382 req_mbps = min(req_mbps, meas_mbps_zone);
383 }
384
385 hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
386 if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
387 hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
388 node->hyst_peak = 0;
389 node->hyst_trig_win = node->hyst_length;
390 node->hyst_mbps = meas_mbps;
391 }
392
393 /*
394 * Check node->max_mbps to avoid double counting peaks that cause
395 * early termination of a window.
396 */
397 if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
398 && !node->max_mbps) {
399 node->hyst_peak++;
400 if (node->hyst_peak >= node->hyst_trigger_count
401 || node->hyst_en)
402 node->hyst_en = node->hyst_length;
403 }
404
405 if (node->hyst_trig_win)
406 node->hyst_trig_win--;
407 if (node->hyst_en)
408 node->hyst_en--;
409
410 if (!node->hyst_trig_win && !node->hyst_en) {
411 node->hyst_peak = 0;
412 node->hyst_mbps = 0;
413 }
414
415 if (node->hyst_en) {
416 if (meas_mbps > node->idle_mbps)
417 req_mbps = max(req_mbps, node->hyst_mbps);
418 }
419
420 /* Stretch the short sample window size, if the traffic is too low */
421 if (meas_mbps < MIN_MBPS) {
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700422 hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
Saravana Kannanb93a2752015-06-11 16:04:23 -0700423 * (100 + node->up_thres)) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700424 hw->down_wake_mbps = 0;
425 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700426 thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
427 node->sample_ms);
428 } else {
429 /*
430 * Up wake vs down wake are intentionally a percentage of
431 * req_mbps vs meas_mbps to make sure the over requesting
432 * phase is handled properly. We only want to wake up and
433 * reduce the vote based on the measured mbps being less than
434 * the previous measurement that caused the "over request".
435 */
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700436 hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
437 hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
438 if (node->wake == UP_WAKE)
439 hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
440 else
441 hw->undo_over_req_mbps = 0;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700442 thres = mbps_to_bytes(meas_mbps, node->sample_ms);
443 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700444
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700445 if (hw->set_hw_events) {
446 hw->down_cnt = node->down_count;
447 hw->set_hw_events(hw, node->sample_ms);
448 } else {
449 node->down_cnt = node->down_count;
450 node->bytes = hw->set_thres(hw, thres);
451 }
Saravana Kannanb93a2752015-06-11 16:04:23 -0700452
453 node->wake = 0;
454 node->prev_req = req_mbps;
455
456 spin_unlock_irqrestore(&irq_lock, flags);
457
458 adj_mbps = req_mbps + node->guard_band_mbps;
459
460 if (adj_mbps > node->prev_ab) {
461 new_bw = adj_mbps;
462 } else {
463 new_bw = adj_mbps * node->decay_rate
Saravana Kannanedad3012013-09-23 19:27:57 -0700464 + node->prev_ab * (100 - node->decay_rate);
465 new_bw /= 100;
466 }
467
468 node->prev_ab = new_bw;
469 if (ab)
470 *ab = roundup(new_bw, node->bw_step);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700471
472 *freq = (new_bw * 100) / io_percent;
Saravana Kannanedad3012013-09-23 19:27:57 -0700473 trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
474 new_bw,
475 *freq,
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700476 hw->up_wake_mbps,
477 hw->down_wake_mbps);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700478 return req_mbps;
Saravana Kannanedad3012013-09-23 19:27:57 -0700479}
480
481static struct hwmon_node *find_hwmon_node(struct devfreq *df)
482{
483 struct hwmon_node *node, *found = NULL;
484
485 mutex_lock(&list_lock);
486 list_for_each_entry(node, &hwmon_list, list)
487 if (node->hw->dev == df->dev.parent ||
488 node->hw->of_node == df->dev.parent->of_node ||
489 (!node->hw->dev && !node->hw->of_node &&
490 node->gov == df->governor)) {
491 found = node;
492 break;
493 }
494 mutex_unlock(&list_lock);
495
496 return found;
497}
498
Saravana Kannanedad3012013-09-23 19:27:57 -0700499int update_bw_hwmon(struct bw_hwmon *hwmon)
500{
501 struct devfreq *df;
502 struct hwmon_node *node;
Saravana Kannanedad3012013-09-23 19:27:57 -0700503 int ret;
504
505 if (!hwmon)
506 return -EINVAL;
507 df = hwmon->df;
508 if (!df)
509 return -ENODEV;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700510 node = df->data;
Saravana Kannanedad3012013-09-23 19:27:57 -0700511 if (!node)
512 return -ENODEV;
513
514 if (!node->mon_started)
515 return -EBUSY;
516
517 dev_dbg(df->dev.parent, "Got update request\n");
518 devfreq_monitor_stop(df);
519
Saravana Kannanb93a2752015-06-11 16:04:23 -0700520 mutex_lock(&df->lock);
521 ret = update_devfreq(df);
522 if (ret)
523 dev_err(df->dev.parent,
524 "Unable to update freq on request!\n");
525 mutex_unlock(&df->lock);
Saravana Kannanedad3012013-09-23 19:27:57 -0700526
527 devfreq_monitor_start(df);
528
529 return 0;
530}
531
Saravana Kannancddae1b2014-08-07 19:38:02 -0700532static int start_monitor(struct devfreq *df, bool init)
533{
534 struct hwmon_node *node = df->data;
535 struct bw_hwmon *hw = node->hw;
536 struct device *dev = df->dev.parent;
537 unsigned long mbps;
538 int ret;
539
540 node->prev_ts = ktime_get();
541
542 if (init) {
543 node->prev_ab = 0;
544 node->resume_freq = 0;
545 node->resume_ab = 0;
546 mbps = (df->previous_freq * node->io_percent) / 100;
Saravana Kannanf9d8fb12016-03-30 17:47:58 -0700547 hw->up_wake_mbps = mbps;
548 hw->down_wake_mbps = MIN_MBPS;
549 hw->undo_over_req_mbps = 0;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700550 ret = hw->start_hwmon(hw, mbps);
551 } else {
552 ret = hw->resume_hwmon(hw);
553 }
554
555 if (ret) {
556 dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
557 return ret;
558 }
559
560 if (init)
561 devfreq_monitor_start(df);
562 else
563 devfreq_monitor_resume(df);
564
565 node->mon_started = true;
566
567 return 0;
568}
569
570static void stop_monitor(struct devfreq *df, bool init)
571{
572 struct hwmon_node *node = df->data;
573 struct bw_hwmon *hw = node->hw;
574
575 node->mon_started = false;
576
577 if (init) {
578 devfreq_monitor_stop(df);
579 hw->stop_hwmon(hw);
580 } else {
581 devfreq_monitor_suspend(df);
582 hw->suspend_hwmon(hw);
583 }
584
585}
586
587static int gov_start(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700588{
589 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700590 struct device *dev = df->dev.parent;
591 struct hwmon_node *node;
592 struct bw_hwmon *hw;
593 struct devfreq_dev_status stat;
594
595 node = find_hwmon_node(df);
596 if (!node) {
597 dev_err(dev, "Unable to find HW monitor!\n");
598 return -ENODEV;
599 }
600 hw = node->hw;
601
602 stat.private_data = NULL;
603 if (df->profile->get_dev_status)
604 ret = df->profile->get_dev_status(df->dev.parent, &stat);
605 if (ret || !stat.private_data)
606 dev_warn(dev, "Device doesn't take AB votes!\n");
607 else
608 node->dev_ab = stat.private_data;
609
610 hw->df = df;
611 node->orig_data = df->data;
612 df->data = node;
613
Saravana Kannancddae1b2014-08-07 19:38:02 -0700614 if (start_monitor(df, true))
Saravana Kannanedad3012013-09-23 19:27:57 -0700615 goto err_start;
Saravana Kannanedad3012013-09-23 19:27:57 -0700616
617 ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
618 if (ret)
619 goto err_sysfs;
620
621 return 0;
622
623err_sysfs:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700624 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700625err_start:
626 df->data = node->orig_data;
627 node->orig_data = NULL;
628 hw->df = NULL;
629 node->dev_ab = NULL;
630 return ret;
631}
632
Saravana Kannancddae1b2014-08-07 19:38:02 -0700633static void gov_stop(struct devfreq *df)
Saravana Kannanedad3012013-09-23 19:27:57 -0700634{
635 struct hwmon_node *node = df->data;
636 struct bw_hwmon *hw = node->hw;
637
638 sysfs_remove_group(&df->dev.kobj, node->attr_grp);
Saravana Kannancddae1b2014-08-07 19:38:02 -0700639 stop_monitor(df, true);
Saravana Kannanedad3012013-09-23 19:27:57 -0700640 df->data = node->orig_data;
641 node->orig_data = NULL;
642 hw->df = NULL;
643 /*
644 * Not all governors know about this additional extended device
645 * configuration. To avoid leaving the extended configuration at a
646 * stale state, set it to 0 and let the next governor take it from
647 * there.
648 */
649 if (node->dev_ab)
650 *node->dev_ab = 0;
651 node->dev_ab = NULL;
652}
653
Saravana Kannancddae1b2014-08-07 19:38:02 -0700654static int gov_suspend(struct devfreq *df)
655{
656 struct hwmon_node *node = df->data;
657 unsigned long resume_freq = df->previous_freq;
658 unsigned long resume_ab = *node->dev_ab;
659
660 if (!node->hw->suspend_hwmon)
661 return -ENOSYS;
662
663 if (node->resume_freq) {
664 dev_warn(df->dev.parent, "Governor already suspended!\n");
665 return -EBUSY;
666 }
667
668 stop_monitor(df, false);
669
670 mutex_lock(&df->lock);
671 update_devfreq(df);
672 mutex_unlock(&df->lock);
673
674 node->resume_freq = resume_freq;
675 node->resume_ab = resume_ab;
676
677 return 0;
678}
679
680static int gov_resume(struct devfreq *df)
681{
682 struct hwmon_node *node = df->data;
683
684 if (!node->hw->resume_hwmon)
685 return -ENOSYS;
686
687 if (!node->resume_freq) {
688 dev_warn(df->dev.parent, "Governor already resumed!\n");
689 return -EBUSY;
690 }
691
692 mutex_lock(&df->lock);
693 update_devfreq(df);
694 mutex_unlock(&df->lock);
695
696 node->resume_freq = 0;
697 node->resume_ab = 0;
698
699 return start_monitor(df, false);
700}
701
Saravana Kannanedad3012013-09-23 19:27:57 -0700702static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
703 unsigned long *freq)
704{
Saravana Kannanedad3012013-09-23 19:27:57 -0700705 struct hwmon_node *node = df->data;
706
Saravana Kannancddae1b2014-08-07 19:38:02 -0700707 /* Suspend/resume sequence */
708 if (!node->mon_started) {
709 *freq = node->resume_freq;
710 *node->dev_ab = node->resume_ab;
711 return 0;
712 }
713
Saravana Kannanb93a2752015-06-11 16:04:23 -0700714 get_bw_and_set_irq(node, freq, node->dev_ab);
Saravana Kannanedad3012013-09-23 19:27:57 -0700715
716 return 0;
717}
718
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700719static ssize_t store_throttle_adj(struct device *dev,
720 struct device_attribute *attr, const char *buf, size_t count)
721{
722 struct devfreq *df = to_devfreq(dev);
723 struct hwmon_node *node = df->data;
724 int ret;
725 unsigned int val;
726
727 if (!node->hw->set_throttle_adj)
728 return -ENOSYS;
729
730 ret = kstrtouint(buf, 10, &val);
731 if (ret)
732 return ret;
733
734 ret = node->hw->set_throttle_adj(node->hw, val);
735
736 if (!ret)
737 return count;
738 else
739 return ret;
740}
741
742static ssize_t show_throttle_adj(struct device *dev,
743 struct device_attribute *attr, char *buf)
744{
745 struct devfreq *df = to_devfreq(dev);
746 struct hwmon_node *node = df->data;
747 unsigned int val;
748
749 if (!node->hw->get_throttle_adj)
750 val = 0;
751 else
752 val = node->hw->get_throttle_adj(node->hw);
753
754 return snprintf(buf, PAGE_SIZE, "%u\n", val);
755}
756
757static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
758 store_throttle_adj);
759
Saravana Kannanedad3012013-09-23 19:27:57 -0700760gov_attr(guard_band_mbps, 0U, 2000U);
761gov_attr(decay_rate, 0U, 100U);
762gov_attr(io_percent, 1U, 100U);
763gov_attr(bw_step, 50U, 1000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700764gov_attr(sample_ms, 1U, 50U);
765gov_attr(up_scale, 0U, 500U);
766gov_attr(up_thres, 1U, 100U);
767gov_attr(down_thres, 0U, 90U);
768gov_attr(down_count, 0U, 90U);
769gov_attr(hist_memory, 0U, 90U);
770gov_attr(hyst_trigger_count, 0U, 90U);
771gov_attr(hyst_length, 0U, 90U);
772gov_attr(idle_mbps, 0U, 2000U);
Saravana Kannanb93a2752015-06-11 16:04:23 -0700773gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
Saravana Kannanedad3012013-09-23 19:27:57 -0700774
775static struct attribute *dev_attr[] = {
Saravana Kannanedad3012013-09-23 19:27:57 -0700776 &dev_attr_guard_band_mbps.attr,
777 &dev_attr_decay_rate.attr,
778 &dev_attr_io_percent.attr,
779 &dev_attr_bw_step.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700780 &dev_attr_sample_ms.attr,
781 &dev_attr_up_scale.attr,
782 &dev_attr_up_thres.attr,
783 &dev_attr_down_thres.attr,
784 &dev_attr_down_count.attr,
785 &dev_attr_hist_memory.attr,
786 &dev_attr_hyst_trigger_count.attr,
787 &dev_attr_hyst_length.attr,
788 &dev_attr_idle_mbps.attr,
Saravana Kannanb93a2752015-06-11 16:04:23 -0700789 &dev_attr_mbps_zones.attr,
Rohit Gupta4d1f4f42015-05-08 12:04:56 -0700790 &dev_attr_throttle_adj.attr,
Saravana Kannanedad3012013-09-23 19:27:57 -0700791 NULL,
792};
793
794static struct attribute_group dev_attr_group = {
795 .name = "bw_hwmon",
796 .attrs = dev_attr,
797};
798
799static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
800 unsigned int event, void *data)
801{
Jonathan Avila52593752017-10-04 17:00:37 -0700802 int ret = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700803 unsigned int sample_ms;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800804 struct hwmon_node *node;
805 struct bw_hwmon *hw;
Saravana Kannanedad3012013-09-23 19:27:57 -0700806
Jonathan Avila52593752017-10-04 17:00:37 -0700807 mutex_lock(&state_lock);
808
Saravana Kannanedad3012013-09-23 19:27:57 -0700809 switch (event) {
810 case DEVFREQ_GOV_START:
811 sample_ms = df->profile->polling_ms;
812 sample_ms = max(MIN_MS, sample_ms);
813 sample_ms = min(MAX_MS, sample_ms);
814 df->profile->polling_ms = sample_ms;
815
Saravana Kannancddae1b2014-08-07 19:38:02 -0700816 ret = gov_start(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700817 if (ret)
Jonathan Avila52593752017-10-04 17:00:37 -0700818 goto out;
Saravana Kannanedad3012013-09-23 19:27:57 -0700819
820 dev_dbg(df->dev.parent,
821 "Enabled dev BW HW monitor governor\n");
822 break;
823
824 case DEVFREQ_GOV_STOP:
Saravana Kannancddae1b2014-08-07 19:38:02 -0700825 gov_stop(df);
Saravana Kannanedad3012013-09-23 19:27:57 -0700826 dev_dbg(df->dev.parent,
827 "Disabled dev BW HW monitor governor\n");
828 break;
829
830 case DEVFREQ_GOV_INTERVAL:
831 sample_ms = *(unsigned int *)data;
832 sample_ms = max(MIN_MS, sample_ms);
833 sample_ms = min(MAX_MS, sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800834 /*
835 * Suspend/resume the HW monitor around the interval update
836 * to prevent the HW monitor IRQ from trying to change
837 * stop/start the delayed workqueue while the interval update
838 * is happening.
839 */
840 node = df->data;
841 hw = node->hw;
842 hw->suspend_hwmon(hw);
Saravana Kannanedad3012013-09-23 19:27:57 -0700843 devfreq_interval_update(df, &sample_ms);
Saravana Kannan3536bd32016-02-18 18:28:29 -0800844 ret = hw->resume_hwmon(hw);
845 if (ret) {
846 dev_err(df->dev.parent,
847 "Unable to resume HW monitor (%d)\n", ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700848 goto out;
Saravana Kannan3536bd32016-02-18 18:28:29 -0800849 }
Saravana Kannanedad3012013-09-23 19:27:57 -0700850 break;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700851
852 case DEVFREQ_GOV_SUSPEND:
853 ret = gov_suspend(df);
854 if (ret) {
855 dev_err(df->dev.parent,
856 "Unable to suspend BW HW mon governor (%d)\n",
857 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700858 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700859 }
860
861 dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
862 break;
863
864 case DEVFREQ_GOV_RESUME:
865 ret = gov_resume(df);
866 if (ret) {
867 dev_err(df->dev.parent,
868 "Unable to resume BW HW mon governor (%d)\n",
869 ret);
Jonathan Avila52593752017-10-04 17:00:37 -0700870 goto out;
Saravana Kannancddae1b2014-08-07 19:38:02 -0700871 }
872
873 dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
874 break;
Saravana Kannanedad3012013-09-23 19:27:57 -0700875 }
876
Jonathan Avila52593752017-10-04 17:00:37 -0700877out:
878 mutex_unlock(&state_lock);
879
880 return ret;
Saravana Kannanedad3012013-09-23 19:27:57 -0700881}
882
883static struct devfreq_governor devfreq_gov_bw_hwmon = {
884 .name = "bw_hwmon",
885 .get_target_freq = devfreq_bw_hwmon_get_freq,
886 .event_handler = devfreq_bw_hwmon_ev_handler,
887};
888
889int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
890{
891 int ret = 0;
892 struct hwmon_node *node;
893 struct attribute_group *attr_grp;
894
895 if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
896 return -EINVAL;
897
898 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
899 if (!node)
900 return -ENOMEM;
901
902 if (hwmon->gov) {
903 attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
904 if (!attr_grp)
905 return -ENOMEM;
906
907 hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
908 hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
909 attr_grp->name = hwmon->gov->name;
910 attr_grp->attrs = dev_attr;
911
912 node->gov = hwmon->gov;
913 node->attr_grp = attr_grp;
914 } else {
915 node->gov = &devfreq_gov_bw_hwmon;
916 node->attr_grp = &dev_attr_group;
917 }
918
Saravana Kannanedad3012013-09-23 19:27:57 -0700919 node->guard_band_mbps = 100;
920 node->decay_rate = 90;
921 node->io_percent = 16;
922 node->bw_step = 190;
Saravana Kannanb93a2752015-06-11 16:04:23 -0700923 node->sample_ms = 50;
924 node->up_scale = 0;
925 node->up_thres = 10;
926 node->down_thres = 0;
927 node->down_count = 3;
928 node->hist_memory = 0;
929 node->hyst_trigger_count = 3;
930 node->hyst_length = 0;
931 node->idle_mbps = 400;
932 node->mbps_zones[0] = 0;
Saravana Kannanedad3012013-09-23 19:27:57 -0700933 node->hw = hwmon;
934
935 mutex_lock(&list_lock);
936 list_add_tail(&node->list, &hwmon_list);
937 mutex_unlock(&list_lock);
938
939 if (hwmon->gov) {
940 ret = devfreq_add_governor(hwmon->gov);
941 } else {
942 mutex_lock(&state_lock);
943 if (!use_cnt)
944 ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
945 if (!ret)
946 use_cnt++;
947 mutex_unlock(&state_lock);
948 }
949
950 if (!ret)
951 dev_info(dev, "BW HWmon governor registered.\n");
952 else
953 dev_err(dev, "BW HWmon governor registration failed!\n");
954
955 return ret;
956}
957
958MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
959MODULE_LICENSE("GPL v2");