blob: 9943c8c75fc8b947dd08ed0de9dfaf1b5f857936 [file] [log] [blame]
Rohit Gupta5e4358c2014-07-18 16:16:02 -07001/*
Rohit Gupta3862a342017-03-07 11:47:52 -08002 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Rohit Gupta5e4358c2014-07-18 16:16:02 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "arm-memlat-mon: " fmt
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/cpu_pm.h>
30#include <linux/cpu.h>
31#include "governor.h"
32#include "governor_memlat.h"
33#include <linux/perf_event.h>
34
35enum ev_index {
36 INST_IDX,
Rohit Gupta3862a342017-03-07 11:47:52 -080037 CM_IDX,
Rohit Gupta5e4358c2014-07-18 16:16:02 -070038 CYC_IDX,
Saravana Kannan83f28462017-09-26 19:45:15 -070039 STALL_CYC_IDX,
Rohit Gupta5e4358c2014-07-18 16:16:02 -070040 NUM_EVENTS
41};
42#define INST_EV 0x08
43#define L2DM_EV 0x17
44#define CYC_EV 0x11
45
46struct event_data {
47 struct perf_event *pevent;
48 unsigned long prev_count;
49};
50
Rohit Guptabb3ac622017-05-03 17:36:54 -070051struct cpu_pmu_stats {
Rohit Gupta5e4358c2014-07-18 16:16:02 -070052 struct event_data events[NUM_EVENTS];
53 ktime_t prev_ts;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070054};
Rohit Gupta5e4358c2014-07-18 16:16:02 -070055
56struct cpu_grp_info {
57 cpumask_t cpus;
Rohit Guptabb3ac622017-05-03 17:36:54 -070058 cpumask_t inited_cpus;
Saravana Kannan52978b92017-09-26 20:29:47 -070059 unsigned int event_ids[NUM_EVENTS];
Rohit Guptabb3ac622017-05-03 17:36:54 -070060 struct cpu_pmu_stats *cpustats;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070061 struct memlat_hwmon hw;
62 struct notifier_block arm_memlat_cpu_notif;
Rohit Guptabb3ac622017-05-03 17:36:54 -070063 struct list_head mon_list;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070064};
65
Rohit Guptabb3ac622017-05-03 17:36:54 -070066#define to_cpustats(cpu_grp, cpu) \
67 (&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
68#define to_devstats(cpu_grp, cpu) \
69 (&cpu_grp->hw.core_stats[cpu - cpumask_first(&cpu_grp->cpus)])
70#define to_cpu_grp(hwmon) container_of(hwmon, struct cpu_grp_info, hw)
71
72static LIST_HEAD(memlat_mon_list);
73static DEFINE_MUTEX(list_lock);
74
75static unsigned long compute_freq(struct cpu_pmu_stats *cpustats,
Rohit Gupta5e4358c2014-07-18 16:16:02 -070076 unsigned long cyc_cnt)
77{
78 ktime_t ts;
79 unsigned int diff;
80 unsigned long freq = 0;
81
82 ts = ktime_get();
Rohit Guptabb3ac622017-05-03 17:36:54 -070083 diff = ktime_to_us(ktime_sub(ts, cpustats->prev_ts));
Rohit Gupta5e4358c2014-07-18 16:16:02 -070084 if (!diff)
85 diff = 1;
Rohit Guptabb3ac622017-05-03 17:36:54 -070086 cpustats->prev_ts = ts;
Rohit Gupta5e4358c2014-07-18 16:16:02 -070087 freq = cyc_cnt;
88 do_div(freq, diff);
89
90 return freq;
91}
92
93#define MAX_COUNT_LIM 0xFFFFFFFFFFFFFFFF
94static inline unsigned long read_event(struct event_data *event)
95{
96 unsigned long ev_count;
97 u64 total, enabled, running;
98
99 total = perf_event_read_value(event->pevent, &enabled, &running);
Saravana Kannan62e41b42017-09-26 19:16:22 -0700100 ev_count = total - event->prev_count;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700101 event->prev_count = total;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700102 return ev_count;
103}
104
105static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
106{
Rohit Guptabb3ac622017-05-03 17:36:54 -0700107 struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
108 struct dev_stats *devstats = to_devstats(cpu_grp, cpu);
Saravana Kannan83f28462017-09-26 19:45:15 -0700109 unsigned long cyc_cnt, stall_cnt;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700110
Rohit Guptabb3ac622017-05-03 17:36:54 -0700111 devstats->inst_count = read_event(&cpustats->events[INST_IDX]);
112 devstats->mem_count = read_event(&cpustats->events[CM_IDX]);
113 cyc_cnt = read_event(&cpustats->events[CYC_IDX]);
114 devstats->freq = compute_freq(cpustats, cyc_cnt);
Saravana Kannan83f28462017-09-26 19:45:15 -0700115 if (cpustats->events[STALL_CYC_IDX].pevent) {
116 stall_cnt = read_event(&cpustats->events[STALL_CYC_IDX]);
117 stall_cnt = min(stall_cnt, cyc_cnt);
118 devstats->stall_pct = mult_frac(100, stall_cnt, cyc_cnt);
119 } else {
120 devstats->stall_pct = 100;
121 }
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700122}
123
124static unsigned long get_cnt(struct memlat_hwmon *hw)
125{
126 int cpu;
Rohit Guptabb3ac622017-05-03 17:36:54 -0700127 struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700128
Rohit Guptabb3ac622017-05-03 17:36:54 -0700129 for_each_cpu(cpu, &cpu_grp->inited_cpus)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700130 read_perf_counters(cpu, cpu_grp);
131
132 return 0;
133}
134
Rohit Guptabb3ac622017-05-03 17:36:54 -0700135static void delete_events(struct cpu_pmu_stats *cpustats)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700136{
137 int i;
138
Saravana Kannan52978b92017-09-26 20:29:47 -0700139 for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
Rohit Guptabb3ac622017-05-03 17:36:54 -0700140 cpustats->events[i].prev_count = 0;
Saravana Kannan83f28462017-09-26 19:45:15 -0700141 if (cpustats->events[i].pevent) {
142 perf_event_release_kernel(cpustats->events[i].pevent);
143 cpustats->events[i].pevent = NULL;
144 }
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700145 }
146}
147
148static void stop_hwmon(struct memlat_hwmon *hw)
149{
Rohit Guptabb3ac622017-05-03 17:36:54 -0700150 int cpu;
151 struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
152 struct dev_stats *devstats;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700153
154 get_online_cpus();
Rohit Guptabb3ac622017-05-03 17:36:54 -0700155 for_each_cpu(cpu, &cpu_grp->inited_cpus) {
156 delete_events(to_cpustats(cpu_grp, cpu));
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700157
158 /* Clear governor data */
Rohit Guptabb3ac622017-05-03 17:36:54 -0700159 devstats = to_devstats(cpu_grp, cpu);
160 devstats->inst_count = 0;
161 devstats->mem_count = 0;
162 devstats->freq = 0;
Saravana Kannan83f28462017-09-26 19:45:15 -0700163 devstats->stall_pct = 0;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700164 }
Rohit Guptabb3ac622017-05-03 17:36:54 -0700165 mutex_lock(&list_lock);
166 if (!cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
167 list_del(&cpu_grp->mon_list);
168 mutex_unlock(&list_lock);
169 cpumask_clear(&cpu_grp->inited_cpus);
170
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700171 put_online_cpus();
172
173 unregister_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
174}
175
176static struct perf_event_attr *alloc_attr(void)
177{
178 struct perf_event_attr *attr;
179
180 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
181 if (!attr)
Rohit Guptabb3ac622017-05-03 17:36:54 -0700182 return attr;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700183
184 attr->type = PERF_TYPE_RAW;
185 attr->size = sizeof(struct perf_event_attr);
186 attr->pinned = 1;
187 attr->exclude_idle = 1;
188
189 return attr;
190}
191
Rohit Guptabb3ac622017-05-03 17:36:54 -0700192static int set_events(struct cpu_grp_info *cpu_grp, int cpu)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700193{
194 struct perf_event *pevent;
195 struct perf_event_attr *attr;
Saravana Kannan52978b92017-09-26 20:29:47 -0700196 int err, i;
Saravana Kannan83f28462017-09-26 19:45:15 -0700197 unsigned int event_id;
Rohit Guptabb3ac622017-05-03 17:36:54 -0700198 struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700199
200 /* Allocate an attribute for event initialization */
201 attr = alloc_attr();
Rohit Guptabb3ac622017-05-03 17:36:54 -0700202 if (!attr)
203 return -ENOMEM;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700204
Saravana Kannan52978b92017-09-26 20:29:47 -0700205 for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
Saravana Kannan83f28462017-09-26 19:45:15 -0700206 event_id = cpu_grp->event_ids[i];
207 if (!event_id)
208 continue;
209
210 attr->config = event_id;
Saravana Kannan52978b92017-09-26 20:29:47 -0700211 pevent = perf_event_create_kernel_counter(attr, cpu, NULL,
212 NULL, NULL);
213 if (IS_ERR(pevent))
214 goto err_out;
215 cpustats->events[i].pevent = pevent;
216 perf_event_enable(pevent);
217 }
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700218
219 kfree(attr);
220 return 0;
221
222err_out:
223 err = PTR_ERR(pevent);
224 kfree(attr);
225 return err;
226}
227
228static int arm_memlat_cpu_callback(struct notifier_block *nb,
229 unsigned long action, void *hcpu)
230{
231 unsigned long cpu = (unsigned long)hcpu;
Rohit Guptabb3ac622017-05-03 17:36:54 -0700232 struct cpu_grp_info *cpu_grp, *tmp;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700233
Rohit Guptabb3ac622017-05-03 17:36:54 -0700234 if (action != CPU_ONLINE)
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700235 return NOTIFY_OK;
236
Rohit Guptabb3ac622017-05-03 17:36:54 -0700237 mutex_lock(&list_lock);
238 list_for_each_entry_safe(cpu_grp, tmp, &memlat_mon_list, mon_list) {
239 if (!cpumask_test_cpu(cpu, &cpu_grp->cpus) ||
240 cpumask_test_cpu(cpu, &cpu_grp->inited_cpus))
241 continue;
242 if (set_events(cpu_grp, cpu))
243 pr_warn("Failed to create perf ev for CPU%lu\n", cpu);
244 else
245 cpumask_set_cpu(cpu, &cpu_grp->inited_cpus);
246 if (cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
247 list_del(&cpu_grp->mon_list);
248 }
249 mutex_unlock(&list_lock);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700250
251 return NOTIFY_OK;
252}
253
254static int start_hwmon(struct memlat_hwmon *hw)
255{
256 int cpu, ret = 0;
Rohit Guptabb3ac622017-05-03 17:36:54 -0700257 struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700258
259 register_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
260
261 get_online_cpus();
262 for_each_cpu(cpu, &cpu_grp->cpus) {
Rohit Guptabb3ac622017-05-03 17:36:54 -0700263 ret = set_events(cpu_grp, cpu);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700264 if (ret) {
265 if (!cpu_online(cpu)) {
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700266 ret = 0;
267 } else {
268 pr_warn("Perf event init failed on CPU%d\n",
269 cpu);
270 break;
271 }
Rohit Guptabb3ac622017-05-03 17:36:54 -0700272 } else {
273 cpumask_set_cpu(cpu, &cpu_grp->inited_cpus);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700274 }
275 }
Rohit Guptabb3ac622017-05-03 17:36:54 -0700276 mutex_lock(&list_lock);
277 if (!cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
278 list_add_tail(&cpu_grp->mon_list, &memlat_mon_list);
279 mutex_unlock(&list_lock);
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700280
281 put_online_cpus();
Rohit Guptabb3ac622017-05-03 17:36:54 -0700282
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700283 return ret;
284}
285
286static int get_mask_from_dev_handle(struct platform_device *pdev,
287 cpumask_t *mask)
288{
289 struct device *dev = &pdev->dev;
290 struct device_node *dev_phandle;
291 struct device *cpu_dev;
292 int cpu, i = 0;
293 int ret = -ENOENT;
294
295 dev_phandle = of_parse_phandle(dev->of_node, "qcom,cpulist", i++);
296 while (dev_phandle) {
297 for_each_possible_cpu(cpu) {
298 cpu_dev = get_cpu_device(cpu);
299 if (cpu_dev && cpu_dev->of_node == dev_phandle) {
300 cpumask_set_cpu(cpu, mask);
301 ret = 0;
302 break;
303 }
304 }
305 dev_phandle = of_parse_phandle(dev->of_node,
306 "qcom,cpulist", i++);
307 }
308
309 return ret;
310}
311
312static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
313{
314 struct device *dev = &pdev->dev;
315 struct memlat_hwmon *hw;
316 struct cpu_grp_info *cpu_grp;
317 int cpu, ret;
Saravana Kannan52978b92017-09-26 20:29:47 -0700318 u32 event_id;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700319
320 cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
321 if (!cpu_grp)
322 return -ENOMEM;
323 cpu_grp->arm_memlat_cpu_notif.notifier_call = arm_memlat_cpu_callback;
324 hw = &cpu_grp->hw;
325
326 hw->dev = dev;
327 hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
328 if (!hw->of_node) {
329 dev_err(dev, "Couldn't find a target device\n");
330 return -ENODEV;
331 }
332
333 if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
334 dev_err(dev, "CPU list is empty\n");
335 return -ENODEV;
336 }
337
338 hw->num_cores = cpumask_weight(&cpu_grp->cpus);
339 hw->core_stats = devm_kzalloc(dev, hw->num_cores *
340 sizeof(*(hw->core_stats)), GFP_KERNEL);
341 if (!hw->core_stats)
342 return -ENOMEM;
343
Rohit Guptabb3ac622017-05-03 17:36:54 -0700344 cpu_grp->cpustats = devm_kzalloc(dev, hw->num_cores *
345 sizeof(*(cpu_grp->cpustats)), GFP_KERNEL);
346 if (!cpu_grp->cpustats)
347 return -ENOMEM;
348
Saravana Kannan52978b92017-09-26 20:29:47 -0700349 cpu_grp->event_ids[CYC_IDX] = CYC_EV;
350
Rohit Gupta3862a342017-03-07 11:47:52 -0800351 ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
Saravana Kannan52978b92017-09-26 20:29:47 -0700352 &event_id);
Rohit Gupta3862a342017-03-07 11:47:52 -0800353 if (ret) {
354 dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
Saravana Kannan52978b92017-09-26 20:29:47 -0700355 L2DM_EV);
356 event_id = L2DM_EV;
Rohit Gupta3862a342017-03-07 11:47:52 -0800357 }
Saravana Kannan52978b92017-09-26 20:29:47 -0700358 cpu_grp->event_ids[CM_IDX] = event_id;
Rohit Gupta3862a342017-03-07 11:47:52 -0800359
Saravana Kannan52978b92017-09-26 20:29:47 -0700360 ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &event_id);
Rohit Gupta3862a342017-03-07 11:47:52 -0800361 if (ret) {
362 dev_dbg(dev, "Inst event not specified. Using def:0x%x\n",
Saravana Kannan52978b92017-09-26 20:29:47 -0700363 INST_EV);
364 event_id = INST_EV;
Rohit Gupta3862a342017-03-07 11:47:52 -0800365 }
Saravana Kannan52978b92017-09-26 20:29:47 -0700366 cpu_grp->event_ids[INST_IDX] = event_id;
Rohit Gupta3862a342017-03-07 11:47:52 -0800367
Saravana Kannan83f28462017-09-26 19:45:15 -0700368 ret = of_property_read_u32(dev->of_node, "qcom,stall-cycle-ev",
369 &event_id);
370 if (ret)
371 dev_dbg(dev, "Stall cycle event not specified. Event ignored.\n");
372 else
373 cpu_grp->event_ids[STALL_CYC_IDX] = event_id;
374
Rohit Guptabb3ac622017-05-03 17:36:54 -0700375 for_each_cpu(cpu, &cpu_grp->cpus)
376 to_devstats(cpu_grp, cpu)->id = cpu;
Rohit Gupta5e4358c2014-07-18 16:16:02 -0700377
378 hw->start_hwmon = &start_hwmon;
379 hw->stop_hwmon = &stop_hwmon;
380 hw->get_cnt = &get_cnt;
381
382 ret = register_memlat(dev, hw);
383 if (ret) {
384 pr_err("Mem Latency Gov registration failed\n");
385 return ret;
386 }
387
388 return 0;
389}
390
391static const struct of_device_id memlat_match_table[] = {
392 { .compatible = "qcom,arm-memlat-mon" },
393 {}
394};
395
396static struct platform_driver arm_memlat_mon_driver = {
397 .probe = arm_memlat_mon_driver_probe,
398 .driver = {
399 .name = "arm-memlat-mon",
400 .of_match_table = memlat_match_table,
401 },
402};
403
404module_platform_driver(arm_memlat_mon_driver);