blob: 89c012a3e11324e5cb54c264796f3b81ce46fe2d [file] [log] [blame]
Saravana Kannancedb32e2014-01-22 00:15:33 -08001/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cache-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30#include <linux/devfreq.h>
31#include "governor.h"
32#include "governor_cache_hwmon.h"
33
Junjie Wu211bbef2014-10-17 13:13:52 -070034struct cache_hwmon_node {
35 unsigned int cycles_per_low_req;
36 unsigned int cycles_per_med_req;
37 unsigned int cycles_per_high_req;
38 unsigned int min_busy;
39 unsigned int max_busy;
40 unsigned int tolerance_mrps;
41 unsigned int guard_band_mhz;
42 unsigned int decay_rate;
43 unsigned long prev_mhz;
44 ktime_t prev_ts;
Junjie Wu21d559a2014-10-20 15:47:07 -070045 bool mon_started;
Junjie Wu211bbef2014-10-17 13:13:52 -070046 struct list_head list;
47 void *orig_data;
48 struct cache_hwmon *hw;
49 struct attribute_group *attr_grp;
50};
51
52static LIST_HEAD(cache_hwmon_list);
53static DEFINE_MUTEX(list_lock);
54
55static int use_cnt;
56static DEFINE_MUTEX(state_lock);
57
Saravana Kannancedb32e2014-01-22 00:15:33 -080058#define show_attr(name) \
59static ssize_t show_##name(struct device *dev, \
60 struct device_attribute *attr, char *buf) \
61{ \
Junjie Wu211bbef2014-10-17 13:13:52 -070062 struct devfreq *df = to_devfreq(dev); \
63 struct cache_hwmon_node *hw = df->data; \
64 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
Saravana Kannancedb32e2014-01-22 00:15:33 -080065}
66
67#define store_attr(name, _min, _max) \
68static ssize_t store_##name(struct device *dev, \
69 struct device_attribute *attr, const char *buf, \
70 size_t count) \
71{ \
72 int ret; \
73 unsigned int val; \
Junjie Wu211bbef2014-10-17 13:13:52 -070074 struct devfreq *df = to_devfreq(dev); \
75 struct cache_hwmon_node *hw = df->data; \
Saravana Kannancedb32e2014-01-22 00:15:33 -080076 ret = kstrtoint(buf, 10, &val); \
77 if (ret) \
78 return ret; \
79 val = max(val, _min); \
80 val = min(val, _max); \
Junjie Wu211bbef2014-10-17 13:13:52 -070081 hw->name = val; \
Saravana Kannancedb32e2014-01-22 00:15:33 -080082 return count; \
83}
84
85#define gov_attr(__attr, min, max) \
86show_attr(__attr) \
87store_attr(__attr, (min), (max)) \
88static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
89
Saravana Kannancedb32e2014-01-22 00:15:33 -080090#define MIN_MS 10U
91#define MAX_MS 500U
Saravana Kannancedb32e2014-01-22 00:15:33 -080092
Junjie Wu211bbef2014-10-17 13:13:52 -070093static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
94{
95 struct cache_hwmon_node *node, *found = NULL;
96
97 mutex_lock(&list_lock);
98 list_for_each_entry(node, &cache_hwmon_list, list)
99 if (node->hw->dev == df->dev.parent ||
100 node->hw->of_node == df->dev.parent->of_node) {
101 found = node;
102 break;
103 }
104 mutex_unlock(&list_lock);
105
106 return found;
107}
108
109static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
Saravana Kannancedb32e2014-01-22 00:15:33 -0800110 struct mrps_stats *stat)
111{
112 ktime_t ts;
113 unsigned int us;
Junjie Wu211bbef2014-10-17 13:13:52 -0700114 struct cache_hwmon *hw = node->hw;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800115
116 /*
117 * Since we are stopping the counters, we don't want this short work
118 * to be interrupted by other tasks and cause the measurements to be
119 * wrong. Not blocking interrupts to avoid affecting interrupt
120 * latency and since they should be short anyway because they run in
121 * atomic context.
122 */
123 preempt_disable();
124
125 ts = ktime_get();
Junjie Wu211bbef2014-10-17 13:13:52 -0700126 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
Saravana Kannancedb32e2014-01-22 00:15:33 -0800127 if (!us)
128 us = 1;
129
Junjie Wu211bbef2014-10-17 13:13:52 -0700130 hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
131 node->prev_ts = ts;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800132
133 preempt_enable();
134
Junjie Wu211bbef2014-10-17 13:13:52 -0700135 dev_dbg(hw->df->dev.parent,
136 "stat H=%3lu, M=%3lu, T=%3lu, b=%3u, f=%4lu, us=%d\n",
Saravana Kannancedb32e2014-01-22 00:15:33 -0800137 stat->high, stat->med, stat->high + stat->med,
Junjie Wu211bbef2014-10-17 13:13:52 -0700138 stat->busy_percent, hw->df->previous_freq / 1000, us);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800139
140 return 0;
141}
142
Junjie Wu211bbef2014-10-17 13:13:52 -0700143static void compute_cache_freq(struct cache_hwmon_node *node,
144 struct mrps_stats *mrps, unsigned long *freq)
Saravana Kannancedb32e2014-01-22 00:15:33 -0800145{
146 unsigned long new_mhz;
147 unsigned int busy;
148
Junjie Wu211bbef2014-10-17 13:13:52 -0700149 new_mhz = mrps->high * node->cycles_per_high_req
150 + mrps->med * node->cycles_per_med_req
151 + mrps->low * node->cycles_per_low_req;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800152
Junjie Wu211bbef2014-10-17 13:13:52 -0700153 busy = max(node->min_busy, mrps->busy_percent);
154 busy = min(node->max_busy, busy);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800155
156 new_mhz *= 100;
157 new_mhz /= busy;
158
Junjie Wu211bbef2014-10-17 13:13:52 -0700159 if (new_mhz < node->prev_mhz) {
160 new_mhz = new_mhz * node->decay_rate + node->prev_mhz
161 * (100 - node->decay_rate);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800162 new_mhz /= 100;
163 }
Junjie Wu211bbef2014-10-17 13:13:52 -0700164 node->prev_mhz = new_mhz;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800165
Junjie Wu211bbef2014-10-17 13:13:52 -0700166 new_mhz += node->guard_band_mhz;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800167 *freq = new_mhz * 1000;
168}
169
170#define TOO_SOON_US (1 * USEC_PER_MSEC)
Junjie Wu21d559a2014-10-20 15:47:07 -0700171int update_cache_hwmon(struct cache_hwmon *hwmon)
Saravana Kannancedb32e2014-01-22 00:15:33 -0800172{
Junjie Wu21d559a2014-10-20 15:47:07 -0700173 struct cache_hwmon_node *node;
174 struct devfreq *df;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800175 ktime_t ts;
176 unsigned int us;
177 int ret;
178
Junjie Wu21d559a2014-10-20 15:47:07 -0700179 if (!hwmon)
180 return -EINVAL;
181 df = hwmon->df;
182 if (!df)
183 return -ENODEV;
184 node = df->data;
185 if (!node)
186 return -ENODEV;
187 if (!node->mon_started)
188 return -EBUSY;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800189
Junjie Wu21d559a2014-10-20 15:47:07 -0700190 dev_dbg(df->dev.parent, "Got update request\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800191 devfreq_monitor_stop(df);
192
193 /*
194 * Don't recalc cache freq if the interrupt comes right after a
195 * previous cache freq calculation. This is done for two reasons:
196 *
197 * 1. Sampling the cache request during a very short duration can
198 * result in a very inaccurate measurement due to very short
199 * bursts.
200 * 2. This can only happen if the limit was hit very close to the end
201 * of the previous sample period. Which means the current cache
202 * request estimate is not very off and doesn't need to be
203 * readjusted.
204 */
205 ts = ktime_get();
Junjie Wu211bbef2014-10-17 13:13:52 -0700206 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
Saravana Kannancedb32e2014-01-22 00:15:33 -0800207 if (us > TOO_SOON_US) {
208 mutex_lock(&df->lock);
209 ret = update_devfreq(df);
210 if (ret)
Junjie Wu211bbef2014-10-17 13:13:52 -0700211 dev_err(df->dev.parent,
Junjie Wu21d559a2014-10-20 15:47:07 -0700212 "Unable to update freq on request!\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800213 mutex_unlock(&df->lock);
214 }
215
216 devfreq_monitor_start(df);
217
Junjie Wu21d559a2014-10-20 15:47:07 -0700218 return 0;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800219}
220
221static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
222 unsigned long *freq)
223{
224 struct mrps_stats stat;
Junjie Wu211bbef2014-10-17 13:13:52 -0700225 struct cache_hwmon_node *node = df->data;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800226
Junjie Wu211bbef2014-10-17 13:13:52 -0700227 memset(&stat, 0, sizeof(stat));
228 measure_mrps_and_set_irq(node, &stat);
229 compute_cache_freq(node, &stat, freq);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800230
231 return 0;
232}
233
234gov_attr(cycles_per_low_req, 1U, 100U);
235gov_attr(cycles_per_med_req, 1U, 100U);
236gov_attr(cycles_per_high_req, 1U, 100U);
237gov_attr(min_busy, 1U, 100U);
238gov_attr(max_busy, 1U, 100U);
239gov_attr(tolerance_mrps, 0U, 100U);
240gov_attr(guard_band_mhz, 0U, 500U);
241gov_attr(decay_rate, 0U, 100U);
242
243static struct attribute *dev_attr[] = {
244 &dev_attr_cycles_per_low_req.attr,
245 &dev_attr_cycles_per_med_req.attr,
246 &dev_attr_cycles_per_high_req.attr,
247 &dev_attr_min_busy.attr,
248 &dev_attr_max_busy.attr,
249 &dev_attr_tolerance_mrps.attr,
250 &dev_attr_guard_band_mhz.attr,
251 &dev_attr_decay_rate.attr,
252 NULL,
253};
254
255static struct attribute_group dev_attr_group = {
256 .name = "cache_hwmon",
257 .attrs = dev_attr,
258};
259
260static int start_monitoring(struct devfreq *df)
261{
262 int ret;
263 struct mrps_stats mrps;
Junjie Wu211bbef2014-10-17 13:13:52 -0700264 struct device *dev = df->dev.parent;
265 struct cache_hwmon_node *node;
266 struct cache_hwmon *hw;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800267
Junjie Wu211bbef2014-10-17 13:13:52 -0700268 node = find_hwmon_node(df);
269 if (!node) {
270 dev_err(dev, "Unable to find HW monitor!\n");
271 return -ENODEV;
272 }
273 hw = node->hw;
274 hw->df = df;
275 node->orig_data = df->data;
276 df->data = node;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800277
Junjie Wu211bbef2014-10-17 13:13:52 -0700278 node->prev_ts = ktime_get();
279 node->prev_mhz = 0;
280 mrps.high = (df->previous_freq / 1000) - node->guard_band_mhz;
281 mrps.high /= node->cycles_per_high_req;
282 mrps.med = mrps.low = 0;
283
284 ret = hw->start_hwmon(hw, &mrps);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800285 if (ret) {
Junjie Wu211bbef2014-10-17 13:13:52 -0700286 dev_err(dev, "Unable to start HW monitor!\n");
287 goto err_start;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800288 }
289
290 devfreq_monitor_start(df);
Junjie Wu21d559a2014-10-20 15:47:07 -0700291 node->mon_started = true;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800292
293 ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
294 if (ret) {
Junjie Wu211bbef2014-10-17 13:13:52 -0700295 dev_err(dev, "Error creating sys entries!\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800296 goto sysfs_fail;
297 }
298
299 return 0;
300
301sysfs_fail:
Junjie Wu21d559a2014-10-20 15:47:07 -0700302 node->mon_started = false;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800303 devfreq_monitor_stop(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700304 hw->stop_hwmon(hw);
305err_start:
306 df->data = node->orig_data;
307 node->orig_data = NULL;
308 hw->df = NULL;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800309 return ret;
310}
311
312static void stop_monitoring(struct devfreq *df)
313{
Junjie Wu211bbef2014-10-17 13:13:52 -0700314 struct cache_hwmon_node *node = df->data;
315 struct cache_hwmon *hw = node->hw;
316
Saravana Kannancedb32e2014-01-22 00:15:33 -0800317 sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
Junjie Wu21d559a2014-10-20 15:47:07 -0700318 node->mon_started = false;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800319 devfreq_monitor_stop(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700320 hw->stop_hwmon(hw);
321 df->data = node->orig_data;
322 node->orig_data = NULL;
323 hw->df = NULL;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800324}
325
326static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
327 unsigned int event, void *data)
328{
329 int ret;
Junjie Wu211bbef2014-10-17 13:13:52 -0700330 unsigned int sample_ms;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800331
332 switch (event) {
333 case DEVFREQ_GOV_START:
334 sample_ms = df->profile->polling_ms;
335 sample_ms = max(MIN_MS, sample_ms);
336 sample_ms = min(MAX_MS, sample_ms);
337 df->profile->polling_ms = sample_ms;
338
339 ret = start_monitoring(df);
340 if (ret)
341 return ret;
342
Junjie Wu211bbef2014-10-17 13:13:52 -0700343 dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800344 break;
345
346 case DEVFREQ_GOV_STOP:
347 stop_monitoring(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700348 dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800349 break;
350
351 case DEVFREQ_GOV_INTERVAL:
352 sample_ms = *(unsigned int *)data;
353 sample_ms = max(MIN_MS, sample_ms);
354 sample_ms = min(MAX_MS, sample_ms);
355 devfreq_interval_update(df, &sample_ms);
356 break;
357 }
358
359 return 0;
360}
361
362static struct devfreq_governor devfreq_cache_hwmon = {
363 .name = "cache_hwmon",
364 .get_target_freq = devfreq_cache_hwmon_get_freq,
365 .event_handler = devfreq_cache_hwmon_ev_handler,
366};
367
Junjie Wu211bbef2014-10-17 13:13:52 -0700368int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
Saravana Kannancedb32e2014-01-22 00:15:33 -0800369{
Junjie Wu211bbef2014-10-17 13:13:52 -0700370 int ret = 0;
371 struct cache_hwmon_node *node;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800372
Junjie Wu211bbef2014-10-17 13:13:52 -0700373 if (!hwmon->dev && !hwmon->of_node)
374 return -EINVAL;
375
376 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
377 if (!node)
378 return -ENOMEM;
379
380 node->cycles_per_med_req = 20;
381 node->cycles_per_high_req = 35;
382 node->min_busy = 100;
383 node->max_busy = 100;
384 node->tolerance_mrps = 5;
385 node->guard_band_mhz = 100;
386 node->decay_rate = 90;
387 node->hw = hwmon;
388 node->attr_grp = &dev_attr_group;
389
390 mutex_lock(&state_lock);
391 if (!use_cnt) {
392 ret = devfreq_add_governor(&devfreq_cache_hwmon);
393 if (!ret)
394 use_cnt++;
395 }
396 mutex_unlock(&state_lock);
397
398 if (!ret) {
399 dev_info(dev, "Cache HWmon governor registered.\n");
400 } else {
401 dev_err(dev, "Failed to add Cache HWmon governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800402 return ret;
403 }
404
Junjie Wu211bbef2014-10-17 13:13:52 -0700405 mutex_lock(&list_lock);
406 list_add_tail(&node->list, &cache_hwmon_list);
407 mutex_unlock(&list_lock);
408
409 return ret;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800410}
411
412MODULE_DESCRIPTION("HW monitor based cache freq driver");
413MODULE_LICENSE("GPL v2");