blob: 81c8ad598175232d836d0056ef2d7f43ec043cbb [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * Qualcomm MSM Runqueue Stats Interface for Userspace
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/hrtimer.h>
21#include <linux/cpu.h>
22#include <linux/kobject.h>
23#include <linux/sysfs.h>
24#include <linux/notifier.h>
25#include <linux/slab.h>
26#include <linux/workqueue.h>
27#include <linux/sched.h>
28#include <linux/spinlock.h>
29
30struct rq_data {
31 unsigned int rq_avg;
32 unsigned int rq_poll_ms;
33 unsigned int def_timer_ms;
34 unsigned int def_interval;
35 int64_t last_time;
36 int64_t total_time;
37 int64_t def_start_time;
38 struct delayed_work rq_work;
39 struct attribute_group *attr_group;
40 struct kobject *kobj;
41 struct delayed_work def_timer_work;
42};
43
44static struct rq_data rq_info;
45static DEFINE_SPINLOCK(rq_lock);
46static struct workqueue_struct *rq_wq;
47
48static void rq_work_fn(struct work_struct *work)
49{
50 int64_t time_diff = 0;
51 int64_t rq_avg = 0;
52 unsigned long flags = 0;
53
54 spin_lock_irqsave(&rq_lock, flags);
55
56 if (!rq_info.last_time)
57 rq_info.last_time = ktime_to_ns(ktime_get());
58 if (!rq_info.rq_avg)
59 rq_info.total_time = 0;
60
61 rq_avg = nr_running() * 10;
62 time_diff = ktime_to_ns(ktime_get()) - rq_info.last_time;
63 do_div(time_diff, (1000 * 1000));
64
65 if (time_diff && rq_info.total_time) {
66 rq_avg = (rq_avg * time_diff) +
67 (rq_info.rq_avg * rq_info.total_time);
68 do_div(rq_avg, rq_info.total_time + time_diff);
69 }
70
71 rq_info.rq_avg = (unsigned int)rq_avg;
72
73 /* Set the next poll */
74 if (rq_info.rq_poll_ms)
75 queue_delayed_work(rq_wq, &rq_info.rq_work,
76 msecs_to_jiffies(rq_info.rq_poll_ms));
77
78 rq_info.total_time += time_diff;
79 rq_info.last_time = ktime_to_ns(ktime_get());
80
81 spin_unlock_irqrestore(&rq_lock, flags);
82}
83
84static void def_work_fn(struct work_struct *work)
85{
86 int64_t diff;
87
88 diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
89 do_div(diff, 1000 * 1000);
90 rq_info.def_interval = (unsigned int) diff;
91
92 /* Notify polling threads on change of value */
93 sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
94}
95
96static ssize_t show_run_queue_avg(struct kobject *kobj,
97 struct kobj_attribute *attr, char *buf)
98{
99 unsigned int val = 0;
100 unsigned long flags = 0;
101
102 spin_lock_irqsave(&rq_lock, flags);
103 /* rq avg currently available only on one core */
104 val = rq_info.rq_avg;
105 rq_info.rq_avg = 0;
106 spin_unlock_irqrestore(&rq_lock, flags);
107
108 return sprintf(buf, "%d.%d\n", val/10, val%10);
109}
110
111static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
112 struct kobj_attribute *attr, char *buf)
113{
114 int ret = 0;
115 unsigned long flags = 0;
116
117 spin_lock_irqsave(&rq_lock, flags);
118 ret = sprintf(buf, "%u\n", rq_info.rq_poll_ms);
119 spin_unlock_irqrestore(&rq_lock, flags);
120
121 return ret;
122}
123
124static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
125 struct kobj_attribute *attr, const char *buf, size_t count)
126{
127 int val = 0;
128 unsigned long flags = 0;
129 static DEFINE_MUTEX(lock_poll_ms);
130
131 mutex_lock(&lock_poll_ms);
132
133 spin_lock_irqsave(&rq_lock, flags);
134 sscanf(buf, "%u", &val);
135 rq_info.rq_poll_ms = val;
136 spin_unlock_irqrestore(&rq_lock, flags);
137
138 if (val <= 0)
139 cancel_delayed_work(&rq_info.rq_work);
140 else
141 queue_delayed_work(rq_wq, &rq_info.rq_work,
142 msecs_to_jiffies(val));
143
144 mutex_unlock(&lock_poll_ms);
145
146 return count;
147}
148
149static ssize_t show_def_timer_ms(struct kobject *kobj,
150 struct kobj_attribute *attr, char *buf)
151{
152 return sprintf(buf, "%u\n", rq_info.def_interval);
153}
154
155static ssize_t store_def_timer_ms(struct kobject *kobj,
156 struct kobj_attribute *attr, const char *buf, size_t count)
157{
158 unsigned int val = 0;
159
160 sscanf(buf, "%u", &val);
161 rq_info.def_timer_ms = val;
162
163 if (val <= 0)
164 cancel_delayed_work(&rq_info.def_timer_work);
165 else {
166 rq_info.def_start_time = ktime_to_ns(ktime_get());
167 queue_delayed_work(rq_wq, &rq_info.def_timer_work,
168 msecs_to_jiffies(val));
169 }
170
171 return count;
172}
173
174#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
175 struct attribute *attrib = NULL; \
176 struct kobj_attribute *ptr = NULL; \
177 ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
178 if (ptr) { \
179 ptr->attr.name = #att; \
180 ptr->attr.mode = S_IRUGO; \
181 ptr->show = show_##att; \
182 ptr->store = NULL; \
183 attrib = &ptr->attr; \
184 } \
185 attrib; })
186
187#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
188 struct attribute *attrib = NULL; \
189 struct kobj_attribute *ptr = NULL; \
190 ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
191 if (ptr) { \
192 ptr->attr.name = #att; \
193 ptr->attr.mode = S_IWUSR|S_IRUSR; \
194 ptr->show = show_##att; \
195 ptr->store = store_##att; \
196 attrib = &ptr->attr; \
197 } \
198 attrib; })
199
200static int init_rq_attribs(void)
201{
202 int i;
203 int err = 0;
204 const int attr_count = 4;
205
206 struct attribute **attribs =
207 kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
208
209 if (!attribs)
210 goto rel;
211
212 rq_info.rq_avg = 0;
213 rq_info.rq_poll_ms = 0;
214
215 attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
216 attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
217 attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
218 attribs[3] = NULL;
219
220 for (i = 0; i < attr_count - 1 ; i++) {
221 if (!attribs[i])
222 goto rel;
223 }
224
225 rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
226 GFP_KERNEL);
227 if (!rq_info.attr_group)
228 goto rel;
229 rq_info.attr_group->attrs = attribs;
230
231 /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
232 rq_info.kobj = kobject_create_and_add("rq-stats",
233 &get_cpu_sysdev(0)->kobj);
234 if (!rq_info.kobj)
235 goto rel;
236
237 err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
238 if (err)
239 kobject_put(rq_info.kobj);
240 else
241 kobject_uevent(rq_info.kobj, KOBJ_ADD);
242
243 if (!err)
244 return err;
245
246rel:
247 for (i = 0; i < attr_count - 1 ; i++)
248 kfree(attribs[i]);
249 kfree(attribs);
250 kfree(rq_info.attr_group);
251 kfree(rq_info.kobj);
252
253 return -ENOMEM;
254}
255
256static int __init msm_rq_stats_init(void)
257{
258 rq_wq = create_singlethread_workqueue("rq_stats");
259 BUG_ON(!rq_wq);
260 INIT_DELAYED_WORK_DEFERRABLE(&rq_info.rq_work, rq_work_fn);
261 INIT_DELAYED_WORK_DEFERRABLE(&rq_info.def_timer_work, def_work_fn);
262 return init_rq_attribs();
263}
264late_initcall(msm_rq_stats_init);