blob: 12a90d41bc37d0a01f3c9eb2799bb26fce15611a [file] [log] [blame]
Rohit Gupta5e4358c2014-07-18 16:16:02 -07001/*
Saravana Kannan6f67be82017-09-07 22:22:49 -07002 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Rohit Gupta5e4358c2014-07-18 16:16:02 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "mem_lat: " fmt
15
16#include <linux/kernel.h>
17#include <linux/sizes.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/delay.h>
22#include <linux/ktime.h>
23#include <linux/time.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/mutex.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of.h>
30#include <linux/devfreq.h>
31#include "governor.h"
32#include "governor_memlat.h"
33
34#include <trace/events/power.h>
35
36struct memlat_node {
37 unsigned int ratio_ceil;
Saravana Kannan83f28462017-09-26 19:45:15 -070038 unsigned int stall_floor;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070039 bool mon_started;
Saravana Kannana0ecf002017-09-07 16:11:16 -070040 bool already_zero;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070041 struct list_head list;
42 void *orig_data;
43 struct memlat_hwmon *hw;
44 struct devfreq_governor *gov;
45 struct attribute_group *attr_grp;
46};
47
48static LIST_HEAD(memlat_list);
49static DEFINE_MUTEX(list_lock);
50
Jonathan Avilae71f95f2017-10-12 15:15:47 -070051static int memlat_use_cnt;
52static int compute_use_cnt;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070053static DEFINE_MUTEX(state_lock);
54
55#define show_attr(name) \
56static ssize_t show_##name(struct device *dev, \
57 struct device_attribute *attr, char *buf) \
58{ \
59 struct devfreq *df = to_devfreq(dev); \
60 struct memlat_node *hw = df->data; \
61 return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
62}
63
64#define store_attr(name, _min, _max) \
65static ssize_t store_##name(struct device *dev, \
66 struct device_attribute *attr, const char *buf, \
67 size_t count) \
68{ \
69 struct devfreq *df = to_devfreq(dev); \
70 struct memlat_node *hw = df->data; \
71 int ret; \
72 unsigned int val; \
73 ret = kstrtouint(buf, 10, &val); \
74 if (ret) \
75 return ret; \
76 val = max(val, _min); \
77 val = min(val, _max); \
78 hw->name = val; \
79 return count; \
80}
81
82#define gov_attr(__attr, min, max) \
83show_attr(__attr) \
84store_attr(__attr, min, max) \
85static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
86
David Keitel724116e2016-09-13 15:41:52 -070087static ssize_t show_map(struct device *dev, struct device_attribute *attr,
88 char *buf)
89{
90 struct devfreq *df = to_devfreq(dev);
91 struct memlat_node *n = df->data;
92 struct core_dev_map *map = n->hw->freq_map;
93 unsigned int cnt = 0;
94
95 cnt += snprintf(buf, PAGE_SIZE, "Core freq (MHz)\tDevice BW\n");
96
97 while (map->core_mhz && cnt < PAGE_SIZE) {
98 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%15u\t%9u\n",
99 map->core_mhz, map->target_freq);
100 map++;
101 }
102 if (cnt < PAGE_SIZE)
103 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
104
105 return cnt;
106}
107
108static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
109
Rohit Gupta870b1802016-04-13 16:55:04 -0700110static unsigned long core_to_dev_freq(struct memlat_node *node,
111 unsigned long coref)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700112{
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700113 struct memlat_hwmon *hw = node->hw;
Rohit Gupta870b1802016-04-13 16:55:04 -0700114 struct core_dev_map *map = hw->freq_map;
115 unsigned long freq = 0;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700116
Rohit Gupta870b1802016-04-13 16:55:04 -0700117 if (!map)
118 goto out;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700119
Rohit Gupta870b1802016-04-13 16:55:04 -0700120 while (map->core_mhz && map->core_mhz < coref)
121 map++;
122 if (!map->core_mhz)
123 map--;
124 freq = map->target_freq;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700125
Rohit Gupta870b1802016-04-13 16:55:04 -0700126out:
127 pr_debug("freq: %lu -> dev: %lu\n", coref, freq);
128 return freq;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700129}
130
131static struct memlat_node *find_memlat_node(struct devfreq *df)
132{
133 struct memlat_node *node, *found = NULL;
134
135 mutex_lock(&list_lock);
136 list_for_each_entry(node, &memlat_list, list)
137 if (node->hw->dev == df->dev.parent ||
138 node->hw->of_node == df->dev.parent->of_node) {
139 found = node;
140 break;
141 }
142 mutex_unlock(&list_lock);
143
144 return found;
145}
146
147static int start_monitor(struct devfreq *df)
148{
149 struct memlat_node *node = df->data;
150 struct memlat_hwmon *hw = node->hw;
151 struct device *dev = df->dev.parent;
152 int ret;
153
154 ret = hw->start_hwmon(hw);
155
156 if (ret) {
157 dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
158 return ret;
159 }
160
161 devfreq_monitor_start(df);
162
163 node->mon_started = true;
164
165 return 0;
166}
167
168static void stop_monitor(struct devfreq *df)
169{
170 struct memlat_node *node = df->data;
171 struct memlat_hwmon *hw = node->hw;
172
173 node->mon_started = false;
174
175 devfreq_monitor_stop(df);
176 hw->stop_hwmon(hw);
177}
178
179static int gov_start(struct devfreq *df)
180{
181 int ret = 0;
182 struct device *dev = df->dev.parent;
183 struct memlat_node *node;
184 struct memlat_hwmon *hw;
185
186 node = find_memlat_node(df);
187 if (!node) {
188 dev_err(dev, "Unable to find HW monitor!\n");
189 return -ENODEV;
190 }
191 hw = node->hw;
192
193 hw->df = df;
194 node->orig_data = df->data;
195 df->data = node;
196
197 if (start_monitor(df))
198 goto err_start;
199
200 ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
201 if (ret)
202 goto err_sysfs;
203
204 return 0;
205
206err_sysfs:
207 stop_monitor(df);
208err_start:
209 df->data = node->orig_data;
210 node->orig_data = NULL;
211 hw->df = NULL;
212 return ret;
213}
214
215static void gov_stop(struct devfreq *df)
216{
217 struct memlat_node *node = df->data;
218 struct memlat_hwmon *hw = node->hw;
219
220 sysfs_remove_group(&df->dev.kobj, node->attr_grp);
221 stop_monitor(df);
222 df->data = node->orig_data;
223 node->orig_data = NULL;
224 hw->df = NULL;
225}
226
227static int devfreq_memlat_get_freq(struct devfreq *df,
228 unsigned long *freq)
229{
Saravana Kannana0ecf002017-09-07 16:11:16 -0700230 int i, lat_dev = 0;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700231 struct memlat_node *node = df->data;
Rohit Gupta870b1802016-04-13 16:55:04 -0700232 struct memlat_hwmon *hw = node->hw;
233 unsigned long max_freq = 0;
234 unsigned int ratio;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700235
Rohit Gupta870b1802016-04-13 16:55:04 -0700236 hw->get_cnt(hw);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700237
Rohit Gupta870b1802016-04-13 16:55:04 -0700238 for (i = 0; i < hw->num_cores; i++) {
239 ratio = hw->core_stats[i].inst_count;
240
241 if (hw->core_stats[i].mem_count)
242 ratio /= hw->core_stats[i].mem_count;
243
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700244 if (!hw->core_stats[i].freq)
Saravana Kannana0ecf002017-09-07 16:11:16 -0700245 continue;
246
Rohit Gupta870b1802016-04-13 16:55:04 -0700247 trace_memlat_dev_meas(dev_name(df->dev.parent),
248 hw->core_stats[i].id,
249 hw->core_stats[i].inst_count,
250 hw->core_stats[i].mem_count,
Saravana Kannan83f28462017-09-26 19:45:15 -0700251 hw->core_stats[i].freq,
252 hw->core_stats[i].stall_pct, ratio);
Rohit Gupta870b1802016-04-13 16:55:04 -0700253
Saravana Kannan6f67be82017-09-07 22:22:49 -0700254 if (ratio <= node->ratio_ceil
Saravana Kannan83f28462017-09-26 19:45:15 -0700255 && hw->core_stats[i].stall_pct >= node->stall_floor
Rohit Gupta870b1802016-04-13 16:55:04 -0700256 && hw->core_stats[i].freq > max_freq) {
257 lat_dev = i;
258 max_freq = hw->core_stats[i].freq;
259 }
260 }
261
Saravana Kannana0ecf002017-09-07 16:11:16 -0700262 if (max_freq)
Rohit Gupta870b1802016-04-13 16:55:04 -0700263 max_freq = core_to_dev_freq(node, max_freq);
Saravana Kannana0ecf002017-09-07 16:11:16 -0700264
265 if (max_freq || !node->already_zero) {
Rohit Gupta870b1802016-04-13 16:55:04 -0700266 trace_memlat_dev_update(dev_name(df->dev.parent),
267 hw->core_stats[lat_dev].id,
268 hw->core_stats[lat_dev].inst_count,
269 hw->core_stats[lat_dev].mem_count,
270 hw->core_stats[lat_dev].freq,
271 max_freq);
272 }
273
Saravana Kannana0ecf002017-09-07 16:11:16 -0700274 node->already_zero = !max_freq;
275
Rohit Gupta870b1802016-04-13 16:55:04 -0700276 *freq = max_freq;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700277 return 0;
278}
279
Rohit Gupta870b1802016-04-13 16:55:04 -0700280gov_attr(ratio_ceil, 1U, 10000U);
Saravana Kannan83f28462017-09-26 19:45:15 -0700281gov_attr(stall_floor, 0U, 100U);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700282
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700283static struct attribute *memlat_dev_attr[] = {
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700284 &dev_attr_ratio_ceil.attr,
Saravana Kannan83f28462017-09-26 19:45:15 -0700285 &dev_attr_stall_floor.attr,
David Keitel724116e2016-09-13 15:41:52 -0700286 &dev_attr_freq_map.attr,
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700287 NULL,
288};
289
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700290static struct attribute *compute_dev_attr[] = {
291 &dev_attr_freq_map.attr,
292 NULL,
293};
294
295static struct attribute_group memlat_dev_attr_group = {
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700296 .name = "mem_latency",
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700297 .attrs = memlat_dev_attr,
298};
299
300static struct attribute_group compute_dev_attr_group = {
301 .name = "compute",
302 .attrs = compute_dev_attr,
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700303};
304
305#define MIN_MS 10U
306#define MAX_MS 500U
307static int devfreq_memlat_ev_handler(struct devfreq *df,
308 unsigned int event, void *data)
309{
310 int ret;
311 unsigned int sample_ms;
312
313 switch (event) {
314 case DEVFREQ_GOV_START:
315 sample_ms = df->profile->polling_ms;
316 sample_ms = max(MIN_MS, sample_ms);
317 sample_ms = min(MAX_MS, sample_ms);
318 df->profile->polling_ms = sample_ms;
319
320 ret = gov_start(df);
321 if (ret)
322 return ret;
323
324 dev_dbg(df->dev.parent,
325 "Enabled Memory Latency governor\n");
326 break;
327
328 case DEVFREQ_GOV_STOP:
329 gov_stop(df);
330 dev_dbg(df->dev.parent,
331 "Disabled Memory Latency governor\n");
332 break;
333
334 case DEVFREQ_GOV_INTERVAL:
335 sample_ms = *(unsigned int *)data;
336 sample_ms = max(MIN_MS, sample_ms);
337 sample_ms = min(MAX_MS, sample_ms);
338 devfreq_interval_update(df, &sample_ms);
339 break;
340 }
341
342 return 0;
343}
344
345static struct devfreq_governor devfreq_gov_memlat = {
346 .name = "mem_latency",
347 .get_target_freq = devfreq_memlat_get_freq,
348 .event_handler = devfreq_memlat_ev_handler,
349};
350
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700351static struct devfreq_governor devfreq_gov_compute = {
352 .name = "compute",
353 .get_target_freq = devfreq_memlat_get_freq,
354 .event_handler = devfreq_memlat_ev_handler,
355};
356
Rohit Gupta870b1802016-04-13 16:55:04 -0700357#define NUM_COLS 2
358static struct core_dev_map *init_core_dev_map(struct device *dev,
359 char *prop_name)
360{
361 int len, nf, i, j;
362 u32 data;
363 struct core_dev_map *tbl;
364 int ret;
365
366 if (!of_find_property(dev->of_node, prop_name, &len))
367 return NULL;
368 len /= sizeof(data);
369
370 if (len % NUM_COLS || len == 0)
371 return NULL;
372 nf = len / NUM_COLS;
373
374 tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
375 GFP_KERNEL);
376 if (!tbl)
377 return NULL;
378
379 for (i = 0, j = 0; i < nf; i++, j += 2) {
380 ret = of_property_read_u32_index(dev->of_node, prop_name, j,
381 &data);
382 if (ret)
383 return NULL;
384 tbl[i].core_mhz = data / 1000;
385
386 ret = of_property_read_u32_index(dev->of_node, prop_name, j + 1,
387 &data);
388 if (ret)
389 return NULL;
390 tbl[i].target_freq = data;
391 pr_debug("Entry%d CPU:%u, Dev:%u\n", i, tbl[i].core_mhz,
392 tbl[i].target_freq);
393 }
394 tbl[i].core_mhz = 0;
395
396 return tbl;
397}
398
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700399static struct memlat_node *register_common(struct device *dev,
400 struct memlat_hwmon *hw)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700401{
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700402 struct memlat_node *node;
403
404 if (!hw->dev && !hw->of_node)
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700405 return ERR_PTR(-EINVAL);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700406
407 node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
408 if (!node)
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700409 return ERR_PTR(-ENOMEM);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700410
411 node->ratio_ceil = 10;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700412 node->hw = hw;
413
Rohit Gupta870b1802016-04-13 16:55:04 -0700414 hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
415 if (!hw->freq_map) {
416 dev_err(dev, "Couldn't find the core-dev freq table!\n");
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700417 return ERR_PTR(-EINVAL);
Rohit Gupta870b1802016-04-13 16:55:04 -0700418 }
419
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700420 mutex_lock(&list_lock);
421 list_add_tail(&node->list, &memlat_list);
422 mutex_unlock(&list_lock);
423
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700424 return node;
425}
426
427int register_compute(struct device *dev, struct memlat_hwmon *hw)
428{
429 struct memlat_node *node;
430 int ret = 0;
431
432 node = register_common(dev, hw);
433 if (IS_ERR(node)) {
434 ret = PTR_ERR(node);
435 goto out;
436 }
437
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700438 mutex_lock(&state_lock);
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700439 node->gov = &devfreq_gov_compute;
440 node->attr_grp = &compute_dev_attr_group;
441
442 if (!compute_use_cnt)
443 ret = devfreq_add_governor(&devfreq_gov_compute);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700444 if (!ret)
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700445 compute_use_cnt++;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700446 mutex_unlock(&state_lock);
447
Jonathan Avilae71f95f2017-10-12 15:15:47 -0700448out:
449 if (!ret)
450 dev_info(dev, "Compute governor registered.\n");
451 else
452 dev_err(dev, "Compute governor registration failed!\n");
453
454 return ret;
455}
456
457int register_memlat(struct device *dev, struct memlat_hwmon *hw)
458{
459 struct memlat_node *node;
460 int ret = 0;
461
462 node = register_common(dev, hw);
463 if (IS_ERR(node)) {
464 ret = PTR_ERR(node);
465 goto out;
466 }
467
468 mutex_lock(&state_lock);
469 node->gov = &devfreq_gov_memlat;
470 node->attr_grp = &memlat_dev_attr_group;
471
472 if (!memlat_use_cnt)
473 ret = devfreq_add_governor(&devfreq_gov_memlat);
474 if (!ret)
475 memlat_use_cnt++;
476 mutex_unlock(&state_lock);
477
478out:
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700479 if (!ret)
480 dev_info(dev, "Memory Latency governor registered.\n");
481 else
482 dev_err(dev, "Memory Latency governor registration failed!\n");
483
484 return ret;
485}
486
487MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
488MODULE_LICENSE("GPL v2");