blob: 15896235ebb7e6aa7ba659067646888f00692b36 [file] [log] [blame]
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +05301/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070014 * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/hrtimer.h>
20#include <linux/cpu.h>
21#include <linux/kobject.h>
22#include <linux/sysfs.h>
23#include <linux/notifier.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
Amar Singhal0a3dc882011-10-20 14:39:14 -070028#include <linux/rq_stats.h>
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070029#include <linux/cpufreq.h>
30#include <linux/kernel_stat.h>
31#include <linux/tick.h>
Krishna Vanka7f563ff2012-03-20 22:04:19 +053032#include <asm/smp_plat.h>
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +053033#include "acpuclock.h"
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -070034#include <linux/suspend.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Anurag Singh6e54bc62011-10-25 12:36:38 -070036#define MAX_LONG_SIZE 24
Amar Singhal0a3dc882011-10-20 14:39:14 -070037#define DEFAULT_RQ_POLL_JIFFIES 1
38#define DEFAULT_DEF_TIMER_JIFFIES 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070040struct notifier_block freq_transition;
41struct notifier_block cpu_hotplug;
42
43struct cpu_load_data {
44 cputime64_t prev_cpu_idle;
45 cputime64_t prev_cpu_wall;
46 cputime64_t prev_cpu_iowait;
47 unsigned int avg_load_maxfreq;
48 unsigned int samples;
49 unsigned int window_size;
50 unsigned int cur_freq;
51 unsigned int policy_max;
52 cpumask_var_t related_cpus;
53 struct mutex cpu_load_mutex;
54};
55
56static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
57
58static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
59{
60 u64 idle_time;
61 u64 cur_wall_time;
62 u64 busy_time;
63
64 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
65
66 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
67 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
68 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
69 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
70 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
71 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
72
73 idle_time = cur_wall_time - busy_time;
74 if (wall)
75 *wall = jiffies_to_usecs(cur_wall_time);
76
77 return jiffies_to_usecs(idle_time);
78}
79
80static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
81{
82 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
83
84 if (idle_time == -1ULL)
85 return get_cpu_idle_time_jiffy(cpu, wall);
86 else
87 idle_time += get_cpu_iowait_time_us(cpu, wall);
88
89 return idle_time;
90}
91
92static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
93 cputime64_t *wall)
94{
95 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
96
97 if (iowait_time == -1ULL)
98 return 0;
99
100 return iowait_time;
101}
102
103static int update_average_load(unsigned int freq, unsigned int cpu)
104{
105
106 struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
107 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
108 unsigned int idle_time, wall_time, iowait_time;
109 unsigned int cur_load, load_at_max_freq;
110
111 cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
112 cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
113
114 wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
115 pcpu->prev_cpu_wall = cur_wall_time;
116
117 idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
118 pcpu->prev_cpu_idle = cur_idle_time;
119
120 iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
121 pcpu->prev_cpu_iowait = cur_iowait_time;
122
123 if (idle_time >= iowait_time)
124 idle_time -= iowait_time;
125
126 if (unlikely(!wall_time || wall_time < idle_time))
127 return 0;
128
129 cur_load = 100 * (wall_time - idle_time) / wall_time;
130
131 /* Calculate the scaled load across CPU */
132 load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
133
134 if (!pcpu->avg_load_maxfreq) {
135 /* This is the first sample in this window*/
136 pcpu->avg_load_maxfreq = load_at_max_freq;
137 pcpu->window_size = wall_time;
138 } else {
139 /*
140 * The is already a sample available in this window.
141 * Compute weighted average with prev entry, so that we get
142 * the precise weighted load.
143 */
144 pcpu->avg_load_maxfreq =
145 ((pcpu->avg_load_maxfreq * pcpu->window_size) +
146 (load_at_max_freq * wall_time)) /
147 (wall_time + pcpu->window_size);
148
149 pcpu->window_size += wall_time;
150 }
151
152 return 0;
153}
154
155static unsigned int report_load_at_max_freq(void)
156{
157 int cpu;
158 struct cpu_load_data *pcpu;
159 unsigned int total_load = 0;
160
161 for_each_online_cpu(cpu) {
162 pcpu = &per_cpu(cpuload, cpu);
163 mutex_lock(&pcpu->cpu_load_mutex);
164 update_average_load(pcpu->cur_freq, cpu);
165 total_load += pcpu->avg_load_maxfreq;
166 pcpu->avg_load_maxfreq = 0;
167 mutex_unlock(&pcpu->cpu_load_mutex);
168 }
169 return total_load;
170}
171
172static int cpufreq_transition_handler(struct notifier_block *nb,
173 unsigned long val, void *data)
174{
175 struct cpufreq_freqs *freqs = data;
176 struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
177 int j;
178
179 switch (val) {
180 case CPUFREQ_POSTCHANGE:
181 for_each_cpu(j, this_cpu->related_cpus) {
182 struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
183 mutex_lock(&pcpu->cpu_load_mutex);
184 update_average_load(freqs->old, freqs->cpu);
185 pcpu->cur_freq = freqs->new;
186 mutex_unlock(&pcpu->cpu_load_mutex);
187 }
188 break;
189 }
190 return 0;
191}
192
193static int cpu_hotplug_handler(struct notifier_block *nb,
194 unsigned long val, void *data)
195{
196 unsigned int cpu = (unsigned long)data;
197 struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
198
199 switch (val) {
200 case CPU_ONLINE:
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530201 if (!this_cpu->cur_freq)
202 this_cpu->cur_freq = acpuclk_get_rate(cpu);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700203 case CPU_ONLINE_FROZEN:
204 this_cpu->avg_load_maxfreq = 0;
205 }
206
207 return NOTIFY_OK;
208}
209
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700210static int system_suspend_handler(struct notifier_block *nb,
211 unsigned long val, void *data)
212{
213 switch (val) {
214 case PM_POST_HIBERNATION:
215 case PM_POST_SUSPEND:
216 rq_info.hotplug_disabled = 0;
217 case PM_HIBERNATION_PREPARE:
218 case PM_SUSPEND_PREPARE:
219 rq_info.hotplug_disabled = 1;
220 break;
221 default:
222 return NOTIFY_DONE;
223 }
224 return NOTIFY_OK;
225}
226
227
228static ssize_t hotplug_disable_show(struct kobject *kobj,
229 struct kobj_attribute *attr, char *buf)
230{
231 unsigned int val = 0;
232 val = rq_info.hotplug_disabled;
233 return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
234}
235
236static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238static void def_work_fn(struct work_struct *work)
239{
240 int64_t diff;
241
242 diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
243 do_div(diff, 1000 * 1000);
244 rq_info.def_interval = (unsigned int) diff;
245
246 /* Notify polling threads on change of value */
247 sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
248}
249
Stephen Boyd671ee192012-07-03 14:33:25 -0700250static ssize_t run_queue_avg_show(struct kobject *kobj,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 struct kobj_attribute *attr, char *buf)
252{
253 unsigned int val = 0;
254 unsigned long flags = 0;
255
256 spin_lock_irqsave(&rq_lock, flags);
257 /* rq avg currently available only on one core */
258 val = rq_info.rq_avg;
259 rq_info.rq_avg = 0;
260 spin_unlock_irqrestore(&rq_lock, flags);
261
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600262 return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263}
264
Stephen Boyd671ee192012-07-03 14:33:25 -0700265static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700268 struct kobj_attribute *attr, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269{
270 int ret = 0;
271 unsigned long flags = 0;
272
273 spin_lock_irqsave(&rq_lock, flags);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700274 ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
275 jiffies_to_msecs(rq_info.rq_poll_jiffies));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 spin_unlock_irqrestore(&rq_lock, flags);
277
278 return ret;
279}
280
281static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284{
Amar Singhal0a3dc882011-10-20 14:39:14 -0700285 unsigned int val = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 unsigned long flags = 0;
287 static DEFINE_MUTEX(lock_poll_ms);
288
289 mutex_lock(&lock_poll_ms);
290
291 spin_lock_irqsave(&rq_lock, flags);
292 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700293 rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 spin_unlock_irqrestore(&rq_lock, flags);
295
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 mutex_unlock(&lock_poll_ms);
297
298 return count;
299}
300
Stephen Boyd671ee192012-07-03 14:33:25 -0700301static struct kobj_attribute run_queue_poll_ms_attr =
302 __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
303 store_run_queue_poll_ms);
304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305static ssize_t show_def_timer_ms(struct kobject *kobj,
306 struct kobj_attribute *attr, char *buf)
307{
Amar Singhal6ea86a12011-12-13 15:44:01 -0800308 return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309}
310
311static ssize_t store_def_timer_ms(struct kobject *kobj,
312 struct kobj_attribute *attr, const char *buf, size_t count)
313{
314 unsigned int val = 0;
315
316 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700317 rq_info.def_timer_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318
Amar Singhal0a3dc882011-10-20 14:39:14 -0700319 rq_info.def_start_time = ktime_to_ns(ktime_get());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 return count;
321}
322
Stephen Boyd671ee192012-07-03 14:33:25 -0700323static struct kobj_attribute def_timer_ms_attr =
324 __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
325 store_def_timer_ms);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700327static ssize_t show_cpu_normalized_load(struct kobject *kobj,
328 struct kobj_attribute *attr, char *buf)
329{
330 return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
331}
332
333static struct kobj_attribute cpu_normalized_load_attr =
334 __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
335 NULL);
336
Stephen Boyd671ee192012-07-03 14:33:25 -0700337static struct attribute *rq_attrs[] = {
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700338 &cpu_normalized_load_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700339 &def_timer_ms_attr.attr,
340 &run_queue_avg_attr.attr,
341 &run_queue_poll_ms_attr.attr,
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700342 &hotplug_disabled_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700343 NULL,
344};
345
346static struct attribute_group rq_attr_group = {
347 .attrs = rq_attrs,
348};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350static int init_rq_attribs(void)
351{
Stephen Boyd671ee192012-07-03 14:33:25 -0700352 int err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353
354 rq_info.rq_avg = 0;
Stephen Boyd671ee192012-07-03 14:33:25 -0700355 rq_info.attr_group = &rq_attr_group;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
357 /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
358 rq_info.kobj = kobject_create_and_add("rq-stats",
Stephen Boyd671ee192012-07-03 14:33:25 -0700359 &get_cpu_device(0)->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 if (!rq_info.kobj)
Stephen Boyd671ee192012-07-03 14:33:25 -0700361 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362
363 err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
364 if (err)
365 kobject_put(rq_info.kobj);
366 else
367 kobject_uevent(rq_info.kobj, KOBJ_ADD);
368
Stephen Boyd671ee192012-07-03 14:33:25 -0700369 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370}
371
372static int __init msm_rq_stats_init(void)
373{
Jin Hong869d6a62011-12-01 13:37:36 -0800374 int ret;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700375 int i;
376 struct cpufreq_policy cpu_policy;
Krishna Vanka7f563ff2012-03-20 22:04:19 +0530377 /* Bail out if this is not an SMP Target */
378 if (!is_smp()) {
379 rq_info.init = 0;
380 return -ENOSYS;
381 }
382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 rq_wq = create_singlethread_workqueue("rq_stats");
384 BUG_ON(!rq_wq);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700385 INIT_WORK(&rq_info.def_timer_work, def_work_fn);
386 spin_lock_init(&rq_lock);
387 rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
388 rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
389 rq_info.rq_poll_last_jiffy = 0;
390 rq_info.def_timer_last_jiffy = 0;
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700391 rq_info.hotplug_disabled = 0;
Jin Hong869d6a62011-12-01 13:37:36 -0800392 ret = init_rq_attribs();
393
Amar Singhal0a3dc882011-10-20 14:39:14 -0700394 rq_info.init = 1;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700395
396 for_each_possible_cpu(i) {
397 struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
398 mutex_init(&pcpu->cpu_load_mutex);
399 cpufreq_get_policy(&cpu_policy, i);
400 pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
Venkat Devarasetty1633a48b2013-02-14 21:15:03 +0530401 if (cpu_online(i))
402 pcpu->cur_freq = acpuclk_get_rate(i);
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700403 cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
404 }
405 freq_transition.notifier_call = cpufreq_transition_handler;
406 cpu_hotplug.notifier_call = cpu_hotplug_handler;
407 cpufreq_register_notifier(&freq_transition,
408 CPUFREQ_TRANSITION_NOTIFIER);
409 register_hotcpu_notifier(&cpu_hotplug);
410
Jin Hong869d6a62011-12-01 13:37:36 -0800411 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412}
413late_initcall(msm_rq_stats_init);
Archana Sathyakumar8c7101a2013-02-20 10:35:35 -0700414
415static int __init msm_rq_stats_early_init(void)
416{
417 /* Bail out if this is not an SMP Target */
418 if (!is_smp()) {
419 rq_info.init = 0;
420 return -ENOSYS;
421 }
422
423 pm_notifier(system_suspend_handler, 0);
424 return 0;
425}
426core_initcall(msm_rq_stats_early_init);