blob: ea08f4bfe249a56f98b4ec75de6705b422c533ef [file] [log] [blame]
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070014 * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/hrtimer.h>
20#include <linux/cpu.h>
21#include <linux/kobject.h>
22#include <linux/sysfs.h>
23#include <linux/notifier.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
Amar Singhal0a3dc882011-10-20 14:39:14 -070028#include <linux/rq_stats.h>
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070029#include <linux/cpufreq.h>
30#include <linux/kernel_stat.h>
31#include <linux/tick.h>
Krishna Vanka7f563ff2012-03-20 22:04:19 +053032#include <asm/smp_plat.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
Anurag Singh6e54bc62011-10-25 12:36:38 -070034#define MAX_LONG_SIZE 24
Amar Singhal0a3dc882011-10-20 14:39:14 -070035#define DEFAULT_RQ_POLL_JIFFIES 1
36#define DEFAULT_DEF_TIMER_JIFFIES 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -070038struct notifier_block freq_transition;
39struct notifier_block cpu_hotplug;
40
41struct cpu_load_data {
42 cputime64_t prev_cpu_idle;
43 cputime64_t prev_cpu_wall;
44 cputime64_t prev_cpu_iowait;
45 unsigned int avg_load_maxfreq;
46 unsigned int samples;
47 unsigned int window_size;
48 unsigned int cur_freq;
49 unsigned int policy_max;
50 cpumask_var_t related_cpus;
51 struct mutex cpu_load_mutex;
52};
53
54static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
55
56static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
57{
58 u64 idle_time;
59 u64 cur_wall_time;
60 u64 busy_time;
61
62 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
63
64 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
65 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
66 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
67 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
68 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
69 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
70
71 idle_time = cur_wall_time - busy_time;
72 if (wall)
73 *wall = jiffies_to_usecs(cur_wall_time);
74
75 return jiffies_to_usecs(idle_time);
76}
77
78static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
79{
80 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
81
82 if (idle_time == -1ULL)
83 return get_cpu_idle_time_jiffy(cpu, wall);
84 else
85 idle_time += get_cpu_iowait_time_us(cpu, wall);
86
87 return idle_time;
88}
89
90static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
91 cputime64_t *wall)
92{
93 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
94
95 if (iowait_time == -1ULL)
96 return 0;
97
98 return iowait_time;
99}
100
101static int update_average_load(unsigned int freq, unsigned int cpu)
102{
103
104 struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
105 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
106 unsigned int idle_time, wall_time, iowait_time;
107 unsigned int cur_load, load_at_max_freq;
108
109 cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
110 cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
111
112 wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
113 pcpu->prev_cpu_wall = cur_wall_time;
114
115 idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
116 pcpu->prev_cpu_idle = cur_idle_time;
117
118 iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
119 pcpu->prev_cpu_iowait = cur_iowait_time;
120
121 if (idle_time >= iowait_time)
122 idle_time -= iowait_time;
123
124 if (unlikely(!wall_time || wall_time < idle_time))
125 return 0;
126
127 cur_load = 100 * (wall_time - idle_time) / wall_time;
128
129 /* Calculate the scaled load across CPU */
130 load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
131
132 if (!pcpu->avg_load_maxfreq) {
133 /* This is the first sample in this window*/
134 pcpu->avg_load_maxfreq = load_at_max_freq;
135 pcpu->window_size = wall_time;
136 } else {
137 /*
138 * The is already a sample available in this window.
139 * Compute weighted average with prev entry, so that we get
140 * the precise weighted load.
141 */
142 pcpu->avg_load_maxfreq =
143 ((pcpu->avg_load_maxfreq * pcpu->window_size) +
144 (load_at_max_freq * wall_time)) /
145 (wall_time + pcpu->window_size);
146
147 pcpu->window_size += wall_time;
148 }
149
150 return 0;
151}
152
153static unsigned int report_load_at_max_freq(void)
154{
155 int cpu;
156 struct cpu_load_data *pcpu;
157 unsigned int total_load = 0;
158
159 for_each_online_cpu(cpu) {
160 pcpu = &per_cpu(cpuload, cpu);
161 mutex_lock(&pcpu->cpu_load_mutex);
162 update_average_load(pcpu->cur_freq, cpu);
163 total_load += pcpu->avg_load_maxfreq;
164 pcpu->avg_load_maxfreq = 0;
165 mutex_unlock(&pcpu->cpu_load_mutex);
166 }
167 return total_load;
168}
169
170static int cpufreq_transition_handler(struct notifier_block *nb,
171 unsigned long val, void *data)
172{
173 struct cpufreq_freqs *freqs = data;
174 struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
175 int j;
176
177 switch (val) {
178 case CPUFREQ_POSTCHANGE:
179 for_each_cpu(j, this_cpu->related_cpus) {
180 struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
181 mutex_lock(&pcpu->cpu_load_mutex);
182 update_average_load(freqs->old, freqs->cpu);
183 pcpu->cur_freq = freqs->new;
184 mutex_unlock(&pcpu->cpu_load_mutex);
185 }
186 break;
187 }
188 return 0;
189}
190
191static int cpu_hotplug_handler(struct notifier_block *nb,
192 unsigned long val, void *data)
193{
194 unsigned int cpu = (unsigned long)data;
195 struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
196
197 switch (val) {
198 case CPU_ONLINE:
199 case CPU_ONLINE_FROZEN:
200 this_cpu->avg_load_maxfreq = 0;
201 }
202
203 return NOTIFY_OK;
204}
205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206static void def_work_fn(struct work_struct *work)
207{
208 int64_t diff;
209
210 diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
211 do_div(diff, 1000 * 1000);
212 rq_info.def_interval = (unsigned int) diff;
213
214 /* Notify polling threads on change of value */
215 sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
216}
217
Stephen Boyd671ee192012-07-03 14:33:25 -0700218static ssize_t run_queue_avg_show(struct kobject *kobj,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 struct kobj_attribute *attr, char *buf)
220{
221 unsigned int val = 0;
222 unsigned long flags = 0;
223
224 spin_lock_irqsave(&rq_lock, flags);
225 /* rq avg currently available only on one core */
226 val = rq_info.rq_avg;
227 rq_info.rq_avg = 0;
228 spin_unlock_irqrestore(&rq_lock, flags);
229
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600230 return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
Stephen Boyd671ee192012-07-03 14:33:25 -0700233static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700236 struct kobj_attribute *attr, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237{
238 int ret = 0;
239 unsigned long flags = 0;
240
241 spin_lock_irqsave(&rq_lock, flags);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700242 ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
243 jiffies_to_msecs(rq_info.rq_poll_jiffies));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 spin_unlock_irqrestore(&rq_lock, flags);
245
246 return ret;
247}
248
249static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
Amar Singhal0a3dc882011-10-20 14:39:14 -0700250 struct kobj_attribute *attr,
251 const char *buf, size_t count)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252{
Amar Singhal0a3dc882011-10-20 14:39:14 -0700253 unsigned int val = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 unsigned long flags = 0;
255 static DEFINE_MUTEX(lock_poll_ms);
256
257 mutex_lock(&lock_poll_ms);
258
259 spin_lock_irqsave(&rq_lock, flags);
260 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700261 rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 spin_unlock_irqrestore(&rq_lock, flags);
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 mutex_unlock(&lock_poll_ms);
265
266 return count;
267}
268
Stephen Boyd671ee192012-07-03 14:33:25 -0700269static struct kobj_attribute run_queue_poll_ms_attr =
270 __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
271 store_run_queue_poll_ms);
272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273static ssize_t show_def_timer_ms(struct kobject *kobj,
274 struct kobj_attribute *attr, char *buf)
275{
Amar Singhal6ea86a12011-12-13 15:44:01 -0800276 return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
278
279static ssize_t store_def_timer_ms(struct kobject *kobj,
280 struct kobj_attribute *attr, const char *buf, size_t count)
281{
282 unsigned int val = 0;
283
284 sscanf(buf, "%u", &val);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700285 rq_info.def_timer_jiffies = msecs_to_jiffies(val);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286
Amar Singhal0a3dc882011-10-20 14:39:14 -0700287 rq_info.def_start_time = ktime_to_ns(ktime_get());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 return count;
289}
290
Stephen Boyd671ee192012-07-03 14:33:25 -0700291static struct kobj_attribute def_timer_ms_attr =
292 __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
293 store_def_timer_ms);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700295static ssize_t show_cpu_normalized_load(struct kobject *kobj,
296 struct kobj_attribute *attr, char *buf)
297{
298 return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
299}
300
301static struct kobj_attribute cpu_normalized_load_attr =
302 __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
303 NULL);
304
Stephen Boyd671ee192012-07-03 14:33:25 -0700305static struct attribute *rq_attrs[] = {
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700306 &cpu_normalized_load_attr.attr,
Stephen Boyd671ee192012-07-03 14:33:25 -0700307 &def_timer_ms_attr.attr,
308 &run_queue_avg_attr.attr,
309 &run_queue_poll_ms_attr.attr,
310 NULL,
311};
312
313static struct attribute_group rq_attr_group = {
314 .attrs = rq_attrs,
315};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
317static int init_rq_attribs(void)
318{
Stephen Boyd671ee192012-07-03 14:33:25 -0700319 int err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320
321 rq_info.rq_avg = 0;
Stephen Boyd671ee192012-07-03 14:33:25 -0700322 rq_info.attr_group = &rq_attr_group;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323
324 /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
325 rq_info.kobj = kobject_create_and_add("rq-stats",
Stephen Boyd671ee192012-07-03 14:33:25 -0700326 &get_cpu_device(0)->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 if (!rq_info.kobj)
Stephen Boyd671ee192012-07-03 14:33:25 -0700328 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329
330 err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
331 if (err)
332 kobject_put(rq_info.kobj);
333 else
334 kobject_uevent(rq_info.kobj, KOBJ_ADD);
335
Stephen Boyd671ee192012-07-03 14:33:25 -0700336 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337}
338
339static int __init msm_rq_stats_init(void)
340{
Jin Hong869d6a62011-12-01 13:37:36 -0800341 int ret;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700342 int i;
343 struct cpufreq_policy cpu_policy;
Krishna Vanka7f563ff2012-03-20 22:04:19 +0530344 /* Bail out if this is not an SMP Target */
345 if (!is_smp()) {
346 rq_info.init = 0;
347 return -ENOSYS;
348 }
349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 rq_wq = create_singlethread_workqueue("rq_stats");
351 BUG_ON(!rq_wq);
Amar Singhal0a3dc882011-10-20 14:39:14 -0700352 INIT_WORK(&rq_info.def_timer_work, def_work_fn);
353 spin_lock_init(&rq_lock);
354 rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
355 rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
356 rq_info.rq_poll_last_jiffy = 0;
357 rq_info.def_timer_last_jiffy = 0;
Jin Hong869d6a62011-12-01 13:37:36 -0800358 ret = init_rq_attribs();
359
Amar Singhal0a3dc882011-10-20 14:39:14 -0700360 rq_info.init = 1;
Narayanan Gopalakrishnan85dd2b82012-08-24 16:10:31 -0700361
362 for_each_possible_cpu(i) {
363 struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
364 mutex_init(&pcpu->cpu_load_mutex);
365 cpufreq_get_policy(&cpu_policy, i);
366 pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
367 cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
368 }
369 freq_transition.notifier_call = cpufreq_transition_handler;
370 cpu_hotplug.notifier_call = cpu_hotplug_handler;
371 cpufreq_register_notifier(&freq_transition,
372 CPUFREQ_TRANSITION_NOTIFIER);
373 register_hotcpu_notifier(&cpu_hotplug);
374
Jin Hong869d6a62011-12-01 13:37:36 -0800375 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376}
377late_initcall(msm_rq_stats_init);