blob: f70022ec5d204dd938ea3a5cfbc4b48b35ce8ab3 [file] [log] [blame]
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +05301/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070014 * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/hrtimer.h>
20#include <linux/cpu.h>
21#include <linux/kobject.h>
22#include <linux/sysfs.h>
23#include <linux/notifier.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
Amar Singhal0a3dc882011-10-20 14:39:14 -070028#include <linux/rq_stats.h>
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070029#include <linux/cpufreq.h>
30#include <linux/kernel_stat.h>
31#include <linux/tick.h>
Krishna Vanka7f563ff2012-03-20 22:04:19 +053032#include <asm/smp_plat.h>
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +053033#include "acpuclock.h"
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -070034#include <linux/suspend.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Anurag Singh6e54bc62011-10-25 12:36:38 -070036#define MAX_LONG_SIZE 24
Amar Singhal0a3dc882011-10-20 14:39:14 -070037#define DEFAULT_RQ_POLL_JIFFIES 1
38#define DEFAULT_DEF_TIMER_JIFFIES 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070040struct notifier_block freq_transition;
41struct notifier_block cpu_hotplug;
42
43struct cpu_load_data {
44 cputime64_t prev_cpu_idle;
45 cputime64_t prev_cpu_wall;
46 cputime64_t prev_cpu_iowait;
47 unsigned int avg_load_maxfreq;
48 unsigned int samples;
49 unsigned int window_size;
50 unsigned int cur_freq;
51 unsigned int policy_max;
52 cpumask_var_t related_cpus;
53 struct mutex cpu_load_mutex;
54};
55
56static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
57
58static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
59{
60 u64 idle_time;
61 u64 cur_wall_time;
62 u64 busy_time;
63
64 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
65
66 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
67 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
68 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
69 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
70 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
71 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
72
73 idle_time = cur_wall_time - busy_time;
74 if (wall)
75 *wall = jiffies_to_usecs(cur_wall_time);
76
77 return jiffies_to_usecs(idle_time);
78}
79
80static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
81{
82 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
83
84 if (idle_time == -1ULL)
85 return get_cpu_idle_time_jiffy(cpu, wall);
86 else
87 idle_time += get_cpu_iowait_time_us(cpu, wall);
88
89 return idle_time;
90}
91
92static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
93 cputime64_t *wall)
94{
95 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
96
97 if (iowait_time == -1ULL)
98 return 0;
99
100 return iowait_time;
101}
102
103static int update_average_load(unsigned int freq, unsigned int cpu)
104{
105
106 struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
107 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
108 unsigned int idle_time, wall_time, iowait_time;
109 unsigned int cur_load, load_at_max_freq;
110
111 cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
112 cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
113
114 wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
115 pcpu->prev_cpu_wall = cur_wall_time;
116
117 idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
118 pcpu->prev_cpu_idle = cur_idle_time;
119
120 iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
121 pcpu->prev_cpu_iowait = cur_iowait_time;
122
123 if (idle_time >= iowait_time)
124 idle_time -= iowait_time;
125
126 if (unlikely(!wall_time || wall_time < idle_time))
127 return 0;
128
129 cur_load = 100 * (wall_time - idle_time) / wall_time;
130
131 /* Calculate the scaled load across CPU */
132 load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
133
134 if (!pcpu->avg_load_maxfreq) {
135 /* This is the first sample in this window*/
136 pcpu->avg_load_maxfreq = load_at_max_freq;
137 pcpu->window_size = wall_time;
138 } else {
139 /*
140 * The is already a sample available in this window.
141 * Compute weighted average with prev entry, so that we get
142 * the precise weighted load.
143 */
144 pcpu->avg_load_maxfreq =
145 ((pcpu->avg_load_maxfreq * pcpu->window_size) +
146 (load_at_max_freq * wall_time)) /
147 (wall_time + pcpu->window_size);
148
149 pcpu->window_size += wall_time;
150 }
151
152 return 0;
153}
154
155static unsigned int report_load_at_max_freq(void)
156{
157 int cpu;
158 struct cpu_load_data *pcpu;
159 unsigned int total_load = 0;
160
161 for_each_online_cpu(cpu) {
162 pcpu = &per_cpu(cpuload, cpu);
163 mutex_lock(&pcpu->cpu_load_mutex);
164 update_average_load(pcpu->cur_freq, cpu);
165 total_load += pcpu->avg_load_maxfreq;
166 pcpu->avg_load_maxfreq = 0;
167 mutex_unlock(&pcpu->cpu_load_mutex);
168 }
169 return total_load;
170}
171
172static int cpufreq_transition_handler(struct notifier_block *nb,
173 unsigned long val, void *data)
174{
175 struct cpufreq_freqs *freqs = data;
176 struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
177 int j;
178
179 switch (val) {
180 case CPUFREQ_POSTCHANGE:
181 for_each_cpu(j, this_cpu->related_cpus) {
182 struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
183 mutex_lock(&pcpu->cpu_load_mutex);
184 update_average_load(freqs->old, freqs->cpu);
185 pcpu->cur_freq = freqs->new;
186 mutex_unlock(&pcpu->cpu_load_mutex);
187 }
188 break;
189 }
190 return 0;
191}
192
193static int cpu_hotplug_handler(struct notifier_block *nb,
194 unsigned long val, void *data)
195{
196 unsigned int cpu = (unsigned long)data;
197 struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
198
199 switch (val) {
200 case CPU_ONLINE:
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530201 if (!this_cpu->cur_freq)
202 this_cpu->cur_freq = acpuclk_get_rate(cpu);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700203 case CPU_ONLINE_FROZEN:
204 this_cpu->avg_load_maxfreq = 0;
205 }
206
207 return NOTIFY_OK;
208}
209
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700210static int system_suspend_handler(struct notifier_block *nb,
211 unsigned long val, void *data)
212{
213 switch (val) {
214 case PM_POST_HIBERNATION:
215 case PM_POST_SUSPEND:
Archana Sathyakumarcae8d382013-03-22 17:05:32 -0600216 case PM_POST_RESTORE:
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700217 rq_info.hotplug_disabled = 0;
Archana Sathyakumarcae8d382013-03-22 17:05:32 -0600218 break;
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700219 case PM_HIBERNATION_PREPARE:
220 case PM_SUSPEND_PREPARE:
221 rq_info.hotplug_disabled = 1;
222 break;
223 default:
224 return NOTIFY_DONE;
225 }
226 return NOTIFY_OK;
227}
228
229
230static ssize_t hotplug_disable_show(struct kobject *kobj,
231 struct kobj_attribute *attr, char *buf)
232{
233 unsigned int val = 0;
234 val = rq_info.hotplug_disabled;
235 return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
236}
237
238static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240static void def_work_fn(struct work_struct *work)
241{
242 int64_t diff;
243
244 diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
245 do_div(diff, 1000 * 1000);
246 rq_info.def_interval = (unsigned int) diff;
247
248 /* Notify polling threads on change of value */
249 sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
250}
251
Stephen Boyd671ee192012-07-03 14:33:25 -0700252static ssize_t run_queue_avg_show(struct kobject *kobj,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 struct kobj_attribute *attr, char *buf)
254{
255 unsigned int val = 0;
256 unsigned long flags = 0;
257
258 spin_lock_irqsave(&rq_lock, flags);
259 /* rq avg currently available only on one core */
260 val = rq_info.rq_avg;
261 rq_info.rq_avg = 0;
262 spin_unlock_irqrestore(&rq_lock, flags);
263
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600264 return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265}
266
Stephen Boyd671ee192012-07-03 14:33:25 -0700267static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700270 struct kobj_attribute *attr, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271{
272 int ret = 0;
273 unsigned long flags = 0;
274
275 spin_lock_irqsave(&rq_lock, flags);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700276 ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
277 jiffies_to_msecs(rq_info.rq_poll_jiffies));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 spin_unlock_irqrestore(&rq_lock, flags);
279
280 return ret;
281}
282
283static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700284 struct kobj_attribute *attr,
285 const char *buf, size_t count)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286{
Amar Singhal0a3dc882011-10-20 14:39:14 -0700287 unsigned int val = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 unsigned long flags = 0;
289 static DEFINE_MUTEX(lock_poll_ms);
290
291 mutex_lock(&lock_poll_ms);
292
293 spin_lock_irqsave(&rq_lock, flags);
294 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700295 rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 spin_unlock_irqrestore(&rq_lock, flags);
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298 mutex_unlock(&lock_poll_ms);
299
300 return count;
301}
302
Stephen Boyd671ee192012-07-03 14:33:25 -0700303static struct kobj_attribute run_queue_poll_ms_attr =
304 __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
305 store_run_queue_poll_ms);
306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307static ssize_t show_def_timer_ms(struct kobject *kobj,
308 struct kobj_attribute *attr, char *buf)
309{
Amar Singhal6ea86a12011-12-13 15:44:01 -0800310 return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311}
312
313static ssize_t store_def_timer_ms(struct kobject *kobj,
314 struct kobj_attribute *attr, const char *buf, size_t count)
315{
316 unsigned int val = 0;
317
318 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700319 rq_info.def_timer_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320
Amar Singhal0a3dc882011-10-20 14:39:14 -0700321 rq_info.def_start_time = ktime_to_ns(ktime_get());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 return count;
323}
324
Stephen Boyd671ee192012-07-03 14:33:25 -0700325static struct kobj_attribute def_timer_ms_attr =
326 __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
327 store_def_timer_ms);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700329static ssize_t show_cpu_normalized_load(struct kobject *kobj,
330 struct kobj_attribute *attr, char *buf)
331{
332 return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
333}
334
335static struct kobj_attribute cpu_normalized_load_attr =
336 __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
337 NULL);
338
Stephen Boyd671ee192012-07-03 14:33:25 -0700339static struct attribute *rq_attrs[] = {
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700340 &cpu_normalized_load_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700341 &def_timer_ms_attr.attr,
342 &run_queue_avg_attr.attr,
343 &run_queue_poll_ms_attr.attr,
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700344 &hotplug_disabled_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700345 NULL,
346};
347
348static struct attribute_group rq_attr_group = {
349 .attrs = rq_attrs,
350};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352static int init_rq_attribs(void)
353{
Stephen Boyd671ee192012-07-03 14:33:25 -0700354 int err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355
356 rq_info.rq_avg = 0;
Stephen Boyd671ee192012-07-03 14:33:25 -0700357 rq_info.attr_group = &rq_attr_group;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358
359 /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
360 rq_info.kobj = kobject_create_and_add("rq-stats",
Stephen Boyd671ee192012-07-03 14:33:25 -0700361 &get_cpu_device(0)->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 if (!rq_info.kobj)
Stephen Boyd671ee192012-07-03 14:33:25 -0700363 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364
365 err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
366 if (err)
367 kobject_put(rq_info.kobj);
368 else
369 kobject_uevent(rq_info.kobj, KOBJ_ADD);
370
Stephen Boyd671ee192012-07-03 14:33:25 -0700371 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372}
373
374static int __init msm_rq_stats_init(void)
375{
Jin Hong869d6a62011-12-01 13:37:36 -0800376 int ret;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700377 int i;
378 struct cpufreq_policy cpu_policy;
Krishna Vanka7f563ff2012-03-20 22:04:19 +0530379 /* Bail out if this is not an SMP Target */
380 if (!is_smp()) {
381 rq_info.init = 0;
382 return -ENOSYS;
383 }
384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 rq_wq = create_singlethread_workqueue("rq_stats");
386 BUG_ON(!rq_wq);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700387 INIT_WORK(&rq_info.def_timer_work, def_work_fn);
388 spin_lock_init(&rq_lock);
389 rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
390 rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
391 rq_info.rq_poll_last_jiffy = 0;
392 rq_info.def_timer_last_jiffy = 0;
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700393 rq_info.hotplug_disabled = 0;
Jin Hong869d6a62011-12-01 13:37:36 -0800394 ret = init_rq_attribs();
395
Amar Singhal0a3dc882011-10-20 14:39:14 -0700396 rq_info.init = 1;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700397
398 for_each_possible_cpu(i) {
399 struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
400 mutex_init(&pcpu->cpu_load_mutex);
401 cpufreq_get_policy(&cpu_policy, i);
402 pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530403 if (cpu_online(i))
404 pcpu->cur_freq = acpuclk_get_rate(i);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700405 cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
406 }
407 freq_transition.notifier_call = cpufreq_transition_handler;
408 cpu_hotplug.notifier_call = cpu_hotplug_handler;
409 cpufreq_register_notifier(&freq_transition,
410 CPUFREQ_TRANSITION_NOTIFIER);
411 register_hotcpu_notifier(&cpu_hotplug);
412
Jin Hong869d6a62011-12-01 13:37:36 -0800413 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414}
415late_initcall(msm_rq_stats_init);
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700416
417static int __init msm_rq_stats_early_init(void)
418{
419 /* Bail out if this is not an SMP Target */
420 if (!is_smp()) {
421 rq_info.init = 0;
422 return -ENOSYS;
423 }
424
425 pm_notifier(system_suspend_handler, 0);
426 return 0;
427}
428core_initcall(msm_rq_stats_early_init);