blob: d1538dd0e5582c9cc7e856dcd009f2861af5cdd0 [file] [log] [blame]
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +05301/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070014 * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/hrtimer.h>
20#include <linux/cpu.h>
21#include <linux/kobject.h>
22#include <linux/sysfs.h>
23#include <linux/notifier.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
Amar Singhal0a3dc882011-10-20 14:39:14 -070028#include <linux/rq_stats.h>
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070029#include <linux/cpufreq.h>
30#include <linux/kernel_stat.h>
31#include <linux/tick.h>
Krishna Vanka7f563ff2012-03-20 22:04:19 +053032#include <asm/smp_plat.h>
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +053033#include "acpuclock.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Anurag Singh6e54bc62011-10-25 12:36:38 -070035#define MAX_LONG_SIZE 24
Amar Singhal0a3dc882011-10-20 14:39:14 -070036#define DEFAULT_RQ_POLL_JIFFIES 1
37#define DEFAULT_DEF_TIMER_JIFFIES 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070039struct notifier_block freq_transition;
40struct notifier_block cpu_hotplug;
41
42struct cpu_load_data {
43 cputime64_t prev_cpu_idle;
44 cputime64_t prev_cpu_wall;
45 cputime64_t prev_cpu_iowait;
46 unsigned int avg_load_maxfreq;
47 unsigned int samples;
48 unsigned int window_size;
49 unsigned int cur_freq;
50 unsigned int policy_max;
51 cpumask_var_t related_cpus;
52 struct mutex cpu_load_mutex;
53};
54
55static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
56
57static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
58{
59 u64 idle_time;
60 u64 cur_wall_time;
61 u64 busy_time;
62
63 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
64
65 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
66 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
67 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
68 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
69 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
70 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
71
72 idle_time = cur_wall_time - busy_time;
73 if (wall)
74 *wall = jiffies_to_usecs(cur_wall_time);
75
76 return jiffies_to_usecs(idle_time);
77}
78
79static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
80{
81 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
82
83 if (idle_time == -1ULL)
84 return get_cpu_idle_time_jiffy(cpu, wall);
85 else
86 idle_time += get_cpu_iowait_time_us(cpu, wall);
87
88 return idle_time;
89}
90
91static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
92 cputime64_t *wall)
93{
94 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
95
96 if (iowait_time == -1ULL)
97 return 0;
98
99 return iowait_time;
100}
101
102static int update_average_load(unsigned int freq, unsigned int cpu)
103{
104
105 struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
106 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
107 unsigned int idle_time, wall_time, iowait_time;
108 unsigned int cur_load, load_at_max_freq;
109
110 cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
111 cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
112
113 wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
114 pcpu->prev_cpu_wall = cur_wall_time;
115
116 idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
117 pcpu->prev_cpu_idle = cur_idle_time;
118
119 iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
120 pcpu->prev_cpu_iowait = cur_iowait_time;
121
122 if (idle_time >= iowait_time)
123 idle_time -= iowait_time;
124
125 if (unlikely(!wall_time || wall_time < idle_time))
126 return 0;
127
128 cur_load = 100 * (wall_time - idle_time) / wall_time;
129
130 /* Calculate the scaled load across CPU */
131 load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
132
133 if (!pcpu->avg_load_maxfreq) {
134 /* This is the first sample in this window*/
135 pcpu->avg_load_maxfreq = load_at_max_freq;
136 pcpu->window_size = wall_time;
137 } else {
138 /*
139 * The is already a sample available in this window.
140 * Compute weighted average with prev entry, so that we get
141 * the precise weighted load.
142 */
143 pcpu->avg_load_maxfreq =
144 ((pcpu->avg_load_maxfreq * pcpu->window_size) +
145 (load_at_max_freq * wall_time)) /
146 (wall_time + pcpu->window_size);
147
148 pcpu->window_size += wall_time;
149 }
150
151 return 0;
152}
153
154static unsigned int report_load_at_max_freq(void)
155{
156 int cpu;
157 struct cpu_load_data *pcpu;
158 unsigned int total_load = 0;
159
160 for_each_online_cpu(cpu) {
161 pcpu = &per_cpu(cpuload, cpu);
162 mutex_lock(&pcpu->cpu_load_mutex);
163 update_average_load(pcpu->cur_freq, cpu);
164 total_load += pcpu->avg_load_maxfreq;
165 pcpu->avg_load_maxfreq = 0;
166 mutex_unlock(&pcpu->cpu_load_mutex);
167 }
168 return total_load;
169}
170
171static int cpufreq_transition_handler(struct notifier_block *nb,
172 unsigned long val, void *data)
173{
174 struct cpufreq_freqs *freqs = data;
175 struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
176 int j;
177
178 switch (val) {
179 case CPUFREQ_POSTCHANGE:
180 for_each_cpu(j, this_cpu->related_cpus) {
181 struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
182 mutex_lock(&pcpu->cpu_load_mutex);
183 update_average_load(freqs->old, freqs->cpu);
184 pcpu->cur_freq = freqs->new;
185 mutex_unlock(&pcpu->cpu_load_mutex);
186 }
187 break;
188 }
189 return 0;
190}
191
192static int cpu_hotplug_handler(struct notifier_block *nb,
193 unsigned long val, void *data)
194{
195 unsigned int cpu = (unsigned long)data;
196 struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
197
198 switch (val) {
199 case CPU_ONLINE:
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530200 if (!this_cpu->cur_freq)
201 this_cpu->cur_freq = acpuclk_get_rate(cpu);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700202 case CPU_ONLINE_FROZEN:
203 this_cpu->avg_load_maxfreq = 0;
204 }
205
206 return NOTIFY_OK;
207}
208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209static void def_work_fn(struct work_struct *work)
210{
211 int64_t diff;
212
213 diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
214 do_div(diff, 1000 * 1000);
215 rq_info.def_interval = (unsigned int) diff;
216
217 /* Notify polling threads on change of value */
218 sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
219}
220
Stephen Boyd671ee192012-07-03 14:33:25 -0700221static ssize_t run_queue_avg_show(struct kobject *kobj,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 struct kobj_attribute *attr, char *buf)
223{
224 unsigned int val = 0;
225 unsigned long flags = 0;
226
227 spin_lock_irqsave(&rq_lock, flags);
228 /* rq avg currently available only on one core */
229 val = rq_info.rq_avg;
230 rq_info.rq_avg = 0;
231 spin_unlock_irqrestore(&rq_lock, flags);
232
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600233 return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234}
235
Stephen Boyd671ee192012-07-03 14:33:25 -0700236static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700239 struct kobj_attribute *attr, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240{
241 int ret = 0;
242 unsigned long flags = 0;
243
244 spin_lock_irqsave(&rq_lock, flags);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700245 ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
246 jiffies_to_msecs(rq_info.rq_poll_jiffies));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247 spin_unlock_irqrestore(&rq_lock, flags);
248
249 return ret;
250}
251
252static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700253 struct kobj_attribute *attr,
254 const char *buf, size_t count)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255{
Amar Singhal0a3dc882011-10-20 14:39:14 -0700256 unsigned int val = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 unsigned long flags = 0;
258 static DEFINE_MUTEX(lock_poll_ms);
259
260 mutex_lock(&lock_poll_ms);
261
262 spin_lock_irqsave(&rq_lock, flags);
263 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700264 rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 spin_unlock_irqrestore(&rq_lock, flags);
266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 mutex_unlock(&lock_poll_ms);
268
269 return count;
270}
271
Stephen Boyd671ee192012-07-03 14:33:25 -0700272static struct kobj_attribute run_queue_poll_ms_attr =
273 __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
274 store_run_queue_poll_ms);
275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276static ssize_t show_def_timer_ms(struct kobject *kobj,
277 struct kobj_attribute *attr, char *buf)
278{
Amar Singhal6ea86a12011-12-13 15:44:01 -0800279 return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280}
281
282static ssize_t store_def_timer_ms(struct kobject *kobj,
283 struct kobj_attribute *attr, const char *buf, size_t count)
284{
285 unsigned int val = 0;
286
287 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700288 rq_info.def_timer_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289
Amar Singhal0a3dc882011-10-20 14:39:14 -0700290 rq_info.def_start_time = ktime_to_ns(ktime_get());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 return count;
292}
293
Stephen Boyd671ee192012-07-03 14:33:25 -0700294static struct kobj_attribute def_timer_ms_attr =
295 __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
296 store_def_timer_ms);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700298static ssize_t show_cpu_normalized_load(struct kobject *kobj,
299 struct kobj_attribute *attr, char *buf)
300{
301 return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
302}
303
304static struct kobj_attribute cpu_normalized_load_attr =
305 __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
306 NULL);
307
Stephen Boyd671ee192012-07-03 14:33:25 -0700308static struct attribute *rq_attrs[] = {
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700309 &cpu_normalized_load_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700310 &def_timer_ms_attr.attr,
311 &run_queue_avg_attr.attr,
312 &run_queue_poll_ms_attr.attr,
313 NULL,
314};
315
316static struct attribute_group rq_attr_group = {
317 .attrs = rq_attrs,
318};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319
320static int init_rq_attribs(void)
321{
Stephen Boyd671ee192012-07-03 14:33:25 -0700322 int err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323
324 rq_info.rq_avg = 0;
Stephen Boyd671ee192012-07-03 14:33:25 -0700325 rq_info.attr_group = &rq_attr_group;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326
327 /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
328 rq_info.kobj = kobject_create_and_add("rq-stats",
Stephen Boyd671ee192012-07-03 14:33:25 -0700329 &get_cpu_device(0)->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 if (!rq_info.kobj)
Stephen Boyd671ee192012-07-03 14:33:25 -0700331 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332
333 err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
334 if (err)
335 kobject_put(rq_info.kobj);
336 else
337 kobject_uevent(rq_info.kobj, KOBJ_ADD);
338
Stephen Boyd671ee192012-07-03 14:33:25 -0700339 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340}
341
342static int __init msm_rq_stats_init(void)
343{
Jin Hong869d6a62011-12-01 13:37:36 -0800344 int ret;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700345 int i;
346 struct cpufreq_policy cpu_policy;
Krishna Vanka7f563ff2012-03-20 22:04:19 +0530347 /* Bail out if this is not an SMP Target */
348 if (!is_smp()) {
349 rq_info.init = 0;
350 return -ENOSYS;
351 }
352
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 rq_wq = create_singlethread_workqueue("rq_stats");
354 BUG_ON(!rq_wq);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700355 INIT_WORK(&rq_info.def_timer_work, def_work_fn);
356 spin_lock_init(&rq_lock);
357 rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
358 rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
359 rq_info.rq_poll_last_jiffy = 0;
360 rq_info.def_timer_last_jiffy = 0;
Jin Hong869d6a62011-12-01 13:37:36 -0800361 ret = init_rq_attribs();
362
Amar Singhal0a3dc882011-10-20 14:39:14 -0700363 rq_info.init = 1;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700364
365 for_each_possible_cpu(i) {
366 struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
367 mutex_init(&pcpu->cpu_load_mutex);
368 cpufreq_get_policy(&cpu_policy, i);
369 pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530370 if (cpu_online(i))
371 pcpu->cur_freq = acpuclk_get_rate(i);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700372 cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
373 }
374 freq_transition.notifier_call = cpufreq_transition_handler;
375 cpu_hotplug.notifier_call = cpu_hotplug_handler;
376 cpufreq_register_notifier(&freq_transition,
377 CPUFREQ_TRANSITION_NOTIFIER);
378 register_hotcpu_notifier(&cpu_hotplug);
379
Jin Hong869d6a62011-12-01 13:37:36 -0800380 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381}
382late_initcall(msm_rq_stats_init);