blob: 7d6587f95042d029ebbbad1f7a3c6e235c9d48c0 [file] [log] [blame]
Saravana Kannancedb32e2014-01-22 00:15:33 -08001/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cache-hwmon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30#include <linux/devfreq.h>
31#include "governor.h"
32#include "governor_cache_hwmon.h"
33
Junjie Wu211bbef2014-10-17 13:13:52 -070034struct cache_hwmon_node {
35 unsigned int cycles_per_low_req;
36 unsigned int cycles_per_med_req;
37 unsigned int cycles_per_high_req;
38 unsigned int min_busy;
39 unsigned int max_busy;
40 unsigned int tolerance_mrps;
41 unsigned int guard_band_mhz;
42 unsigned int decay_rate;
43 unsigned long prev_mhz;
44 ktime_t prev_ts;
45 struct list_head list;
46 void *orig_data;
47 struct cache_hwmon *hw;
48 struct attribute_group *attr_grp;
49};
50
51static LIST_HEAD(cache_hwmon_list);
52static DEFINE_MUTEX(list_lock);
53
54static int use_cnt;
55static DEFINE_MUTEX(state_lock);
56
Saravana Kannancedb32e2014-01-22 00:15:33 -080057#define show_attr(name) \
58static ssize_t show_##name(struct device *dev, \
59 struct device_attribute *attr, char *buf) \
60{ \
Junjie Wu211bbef2014-10-17 13:13:52 -070061 struct devfreq *df = to_devfreq(dev); \
62 struct cache_hwmon_node *hw = df->data; \
63 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
Saravana Kannancedb32e2014-01-22 00:15:33 -080064}
65
66#define store_attr(name, _min, _max) \
67static ssize_t store_##name(struct device *dev, \
68 struct device_attribute *attr, const char *buf, \
69 size_t count) \
70{ \
71 int ret; \
72 unsigned int val; \
Junjie Wu211bbef2014-10-17 13:13:52 -070073 struct devfreq *df = to_devfreq(dev); \
74 struct cache_hwmon_node *hw = df->data; \
Saravana Kannancedb32e2014-01-22 00:15:33 -080075 ret = kstrtoint(buf, 10, &val); \
76 if (ret) \
77 return ret; \
78 val = max(val, _min); \
79 val = min(val, _max); \
Junjie Wu211bbef2014-10-17 13:13:52 -070080 hw->name = val; \
Saravana Kannancedb32e2014-01-22 00:15:33 -080081 return count; \
82}
83
84#define gov_attr(__attr, min, max) \
85show_attr(__attr) \
86store_attr(__attr, (min), (max)) \
87static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
88
Saravana Kannancedb32e2014-01-22 00:15:33 -080089#define MIN_MS 10U
90#define MAX_MS 500U
Saravana Kannancedb32e2014-01-22 00:15:33 -080091
Junjie Wu211bbef2014-10-17 13:13:52 -070092static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
93{
94 struct cache_hwmon_node *node, *found = NULL;
95
96 mutex_lock(&list_lock);
97 list_for_each_entry(node, &cache_hwmon_list, list)
98 if (node->hw->dev == df->dev.parent ||
99 node->hw->of_node == df->dev.parent->of_node) {
100 found = node;
101 break;
102 }
103 mutex_unlock(&list_lock);
104
105 return found;
106}
107
108static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
Saravana Kannancedb32e2014-01-22 00:15:33 -0800109 struct mrps_stats *stat)
110{
111 ktime_t ts;
112 unsigned int us;
Junjie Wu211bbef2014-10-17 13:13:52 -0700113 struct cache_hwmon *hw = node->hw;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800114
115 /*
116 * Since we are stopping the counters, we don't want this short work
117 * to be interrupted by other tasks and cause the measurements to be
118 * wrong. Not blocking interrupts to avoid affecting interrupt
119 * latency and since they should be short anyway because they run in
120 * atomic context.
121 */
122 preempt_disable();
123
124 ts = ktime_get();
Junjie Wu211bbef2014-10-17 13:13:52 -0700125 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
Saravana Kannancedb32e2014-01-22 00:15:33 -0800126 if (!us)
127 us = 1;
128
Junjie Wu211bbef2014-10-17 13:13:52 -0700129 hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
130 node->prev_ts = ts;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800131
132 preempt_enable();
133
Junjie Wu211bbef2014-10-17 13:13:52 -0700134 dev_dbg(hw->df->dev.parent,
135 "stat H=%3lu, M=%3lu, T=%3lu, b=%3u, f=%4lu, us=%d\n",
Saravana Kannancedb32e2014-01-22 00:15:33 -0800136 stat->high, stat->med, stat->high + stat->med,
Junjie Wu211bbef2014-10-17 13:13:52 -0700137 stat->busy_percent, hw->df->previous_freq / 1000, us);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800138
139 return 0;
140}
141
Junjie Wu211bbef2014-10-17 13:13:52 -0700142static void compute_cache_freq(struct cache_hwmon_node *node,
143 struct mrps_stats *mrps, unsigned long *freq)
Saravana Kannancedb32e2014-01-22 00:15:33 -0800144{
145 unsigned long new_mhz;
146 unsigned int busy;
147
Junjie Wu211bbef2014-10-17 13:13:52 -0700148 new_mhz = mrps->high * node->cycles_per_high_req
149 + mrps->med * node->cycles_per_med_req
150 + mrps->low * node->cycles_per_low_req;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800151
Junjie Wu211bbef2014-10-17 13:13:52 -0700152 busy = max(node->min_busy, mrps->busy_percent);
153 busy = min(node->max_busy, busy);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800154
155 new_mhz *= 100;
156 new_mhz /= busy;
157
Junjie Wu211bbef2014-10-17 13:13:52 -0700158 if (new_mhz < node->prev_mhz) {
159 new_mhz = new_mhz * node->decay_rate + node->prev_mhz
160 * (100 - node->decay_rate);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800161 new_mhz /= 100;
162 }
Junjie Wu211bbef2014-10-17 13:13:52 -0700163 node->prev_mhz = new_mhz;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800164
Junjie Wu211bbef2014-10-17 13:13:52 -0700165 new_mhz += node->guard_band_mhz;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800166 *freq = new_mhz * 1000;
167}
168
169#define TOO_SOON_US (1 * USEC_PER_MSEC)
170static irqreturn_t mon_intr_handler(int irq, void *dev)
171{
Junjie Wu211bbef2014-10-17 13:13:52 -0700172 struct cache_hwmon_node *node = dev;
173 struct devfreq *df = node->hw->df;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800174 ktime_t ts;
175 unsigned int us;
176 int ret;
177
Junjie Wu211bbef2014-10-17 13:13:52 -0700178 if (!node->hw->is_valid_irq(node->hw))
Saravana Kannancedb32e2014-01-22 00:15:33 -0800179 return IRQ_NONE;
180
Junjie Wu211bbef2014-10-17 13:13:52 -0700181 dev_dbg(df->dev.parent, "Got interrupt\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800182 devfreq_monitor_stop(df);
183
184 /*
185 * Don't recalc cache freq if the interrupt comes right after a
186 * previous cache freq calculation. This is done for two reasons:
187 *
188 * 1. Sampling the cache request during a very short duration can
189 * result in a very inaccurate measurement due to very short
190 * bursts.
191 * 2. This can only happen if the limit was hit very close to the end
192 * of the previous sample period. Which means the current cache
193 * request estimate is not very off and doesn't need to be
194 * readjusted.
195 */
196 ts = ktime_get();
Junjie Wu211bbef2014-10-17 13:13:52 -0700197 us = ktime_to_us(ktime_sub(ts, node->prev_ts));
Saravana Kannancedb32e2014-01-22 00:15:33 -0800198 if (us > TOO_SOON_US) {
199 mutex_lock(&df->lock);
200 ret = update_devfreq(df);
201 if (ret)
Junjie Wu211bbef2014-10-17 13:13:52 -0700202 dev_err(df->dev.parent,
203 "Unable to update freq on IRQ!\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800204 mutex_unlock(&df->lock);
205 }
206
207 devfreq_monitor_start(df);
208
209 return IRQ_HANDLED;
210}
211
212static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
213 unsigned long *freq)
214{
215 struct mrps_stats stat;
Junjie Wu211bbef2014-10-17 13:13:52 -0700216 struct cache_hwmon_node *node = df->data;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800217
Junjie Wu211bbef2014-10-17 13:13:52 -0700218 memset(&stat, 0, sizeof(stat));
219 measure_mrps_and_set_irq(node, &stat);
220 compute_cache_freq(node, &stat, freq);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800221
222 return 0;
223}
224
225gov_attr(cycles_per_low_req, 1U, 100U);
226gov_attr(cycles_per_med_req, 1U, 100U);
227gov_attr(cycles_per_high_req, 1U, 100U);
228gov_attr(min_busy, 1U, 100U);
229gov_attr(max_busy, 1U, 100U);
230gov_attr(tolerance_mrps, 0U, 100U);
231gov_attr(guard_band_mhz, 0U, 500U);
232gov_attr(decay_rate, 0U, 100U);
233
234static struct attribute *dev_attr[] = {
235 &dev_attr_cycles_per_low_req.attr,
236 &dev_attr_cycles_per_med_req.attr,
237 &dev_attr_cycles_per_high_req.attr,
238 &dev_attr_min_busy.attr,
239 &dev_attr_max_busy.attr,
240 &dev_attr_tolerance_mrps.attr,
241 &dev_attr_guard_band_mhz.attr,
242 &dev_attr_decay_rate.attr,
243 NULL,
244};
245
246static struct attribute_group dev_attr_group = {
247 .name = "cache_hwmon",
248 .attrs = dev_attr,
249};
250
251static int start_monitoring(struct devfreq *df)
252{
253 int ret;
254 struct mrps_stats mrps;
Junjie Wu211bbef2014-10-17 13:13:52 -0700255 struct device *dev = df->dev.parent;
256 struct cache_hwmon_node *node;
257 struct cache_hwmon *hw;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800258
Junjie Wu211bbef2014-10-17 13:13:52 -0700259 node = find_hwmon_node(df);
260 if (!node) {
261 dev_err(dev, "Unable to find HW monitor!\n");
262 return -ENODEV;
263 }
264 hw = node->hw;
265 hw->df = df;
266 node->orig_data = df->data;
267 df->data = node;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800268
Junjie Wu211bbef2014-10-17 13:13:52 -0700269 node->prev_ts = ktime_get();
270 node->prev_mhz = 0;
271 mrps.high = (df->previous_freq / 1000) - node->guard_band_mhz;
272 mrps.high /= node->cycles_per_high_req;
273 mrps.med = mrps.low = 0;
274
275 ret = hw->start_hwmon(hw, &mrps);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800276 if (ret) {
Junjie Wu211bbef2014-10-17 13:13:52 -0700277 dev_err(dev, "Unable to start HW monitor!\n");
278 goto err_start;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800279 }
280
281 devfreq_monitor_start(df);
282
Junjie Wu211bbef2014-10-17 13:13:52 -0700283 if (hw->irq)
284 ret = request_threaded_irq(hw->irq, NULL, mon_intr_handler,
Saravana Kannancedb32e2014-01-22 00:15:33 -0800285 IRQF_ONESHOT | IRQF_SHARED,
Junjie Wu211bbef2014-10-17 13:13:52 -0700286 "cache_hwmon", node);
Saravana Kannancedb32e2014-01-22 00:15:33 -0800287 if (ret) {
Junjie Wu211bbef2014-10-17 13:13:52 -0700288 dev_err(dev, "Unable to register interrupt handler!\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800289 goto req_irq_fail;
290 }
291
292 ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
293 if (ret) {
Junjie Wu211bbef2014-10-17 13:13:52 -0700294 dev_err(dev, "Error creating sys entries!\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800295 goto sysfs_fail;
296 }
297
298 return 0;
299
300sysfs_fail:
Junjie Wu211bbef2014-10-17 13:13:52 -0700301 if (hw->irq) {
302 disable_irq(hw->irq);
303 free_irq(hw->irq, node);
304 }
Saravana Kannancedb32e2014-01-22 00:15:33 -0800305req_irq_fail:
306 devfreq_monitor_stop(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700307 hw->stop_hwmon(hw);
308err_start:
309 df->data = node->orig_data;
310 node->orig_data = NULL;
311 hw->df = NULL;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800312 return ret;
313}
314
315static void stop_monitoring(struct devfreq *df)
316{
Junjie Wu211bbef2014-10-17 13:13:52 -0700317 struct cache_hwmon_node *node = df->data;
318 struct cache_hwmon *hw = node->hw;
319
Saravana Kannancedb32e2014-01-22 00:15:33 -0800320 sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
Junjie Wu211bbef2014-10-17 13:13:52 -0700321 if (hw->irq) {
322 disable_irq(hw->irq);
323 free_irq(hw->irq, node);
324 }
Saravana Kannancedb32e2014-01-22 00:15:33 -0800325 devfreq_monitor_stop(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700326 hw->stop_hwmon(hw);
327 df->data = node->orig_data;
328 node->orig_data = NULL;
329 hw->df = NULL;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800330}
331
332static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
333 unsigned int event, void *data)
334{
335 int ret;
Junjie Wu211bbef2014-10-17 13:13:52 -0700336 unsigned int sample_ms;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800337
338 switch (event) {
339 case DEVFREQ_GOV_START:
340 sample_ms = df->profile->polling_ms;
341 sample_ms = max(MIN_MS, sample_ms);
342 sample_ms = min(MAX_MS, sample_ms);
343 df->profile->polling_ms = sample_ms;
344
345 ret = start_monitoring(df);
346 if (ret)
347 return ret;
348
Junjie Wu211bbef2014-10-17 13:13:52 -0700349 dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800350 break;
351
352 case DEVFREQ_GOV_STOP:
353 stop_monitoring(df);
Junjie Wu211bbef2014-10-17 13:13:52 -0700354 dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800355 break;
356
357 case DEVFREQ_GOV_INTERVAL:
358 sample_ms = *(unsigned int *)data;
359 sample_ms = max(MIN_MS, sample_ms);
360 sample_ms = min(MAX_MS, sample_ms);
361 devfreq_interval_update(df, &sample_ms);
362 break;
363 }
364
365 return 0;
366}
367
368static struct devfreq_governor devfreq_cache_hwmon = {
369 .name = "cache_hwmon",
370 .get_target_freq = devfreq_cache_hwmon_get_freq,
371 .event_handler = devfreq_cache_hwmon_ev_handler,
372};
373
Junjie Wu211bbef2014-10-17 13:13:52 -0700374int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
Saravana Kannancedb32e2014-01-22 00:15:33 -0800375{
Junjie Wu211bbef2014-10-17 13:13:52 -0700376 int ret = 0;
377 struct cache_hwmon_node *node;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800378
Junjie Wu211bbef2014-10-17 13:13:52 -0700379 if (!hwmon->dev && !hwmon->of_node)
380 return -EINVAL;
381
382 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
383 if (!node)
384 return -ENOMEM;
385
386 node->cycles_per_med_req = 20;
387 node->cycles_per_high_req = 35;
388 node->min_busy = 100;
389 node->max_busy = 100;
390 node->tolerance_mrps = 5;
391 node->guard_band_mhz = 100;
392 node->decay_rate = 90;
393 node->hw = hwmon;
394 node->attr_grp = &dev_attr_group;
395
396 mutex_lock(&state_lock);
397 if (!use_cnt) {
398 ret = devfreq_add_governor(&devfreq_cache_hwmon);
399 if (!ret)
400 use_cnt++;
401 }
402 mutex_unlock(&state_lock);
403
404 if (!ret) {
405 dev_info(dev, "Cache HWmon governor registered.\n");
406 } else {
407 dev_err(dev, "Failed to add Cache HWmon governor\n");
Saravana Kannancedb32e2014-01-22 00:15:33 -0800408 return ret;
409 }
410
Junjie Wu211bbef2014-10-17 13:13:52 -0700411 mutex_lock(&list_lock);
412 list_add_tail(&node->list, &cache_hwmon_list);
413 mutex_unlock(&list_lock);
414
415 return ret;
Saravana Kannancedb32e2014-01-22 00:15:33 -0800416}
417
418MODULE_DESCRIPTION("HW monitor based cache freq driver");
419MODULE_LICENSE("GPL v2");