blob: 01b464ab303bd3917bf8598d7c22b813c1225000 [file] [log] [blame]
Lynus Vazbdb0c072011-11-14 16:55:04 +05301/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Lucille Sylvester611d0d92011-08-12 16:11:25 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include <linux/miscdevice.h>
18#include <linux/poll.h>
19#include <linux/uaccess.h>
Lucille Sylvester20ec38a2011-09-14 10:51:28 -060020#include <linux/idle_stats_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070021#include <linux/module.h>
Lucille Sylvester611d0d92011-08-12 16:11:25 -060022
23DEFINE_MUTEX(device_list_lock);
24LIST_HEAD(device_list);
25
26static ktime_t us_to_ktime(__u32 us)
27{
28 return ns_to_ktime((u64)us * NSEC_PER_USEC);
29}
30
31static struct msm_idle_stats_device *_device_from_minor(unsigned int minor)
32{
33 struct msm_idle_stats_device *device, *ret = NULL;
34
35
36 mutex_lock(&device_list_lock);
37 list_for_each_entry(device, &device_list, list) {
38 if (minor == device->miscdev.minor) {
39 ret = device;
40 break;
41 }
42 }
43 mutex_unlock(&device_list_lock);
44 return ret;
45}
46
Lynus Vazbdb0c072011-11-14 16:55:04 +053047void msm_idle_stats_update_event(struct msm_idle_stats_device *device,
48 __u32 event)
Lucille Sylvester611d0d92011-08-12 16:11:25 -060049{
50 __u32 wake_up = !device->stats->event;
51
52 device->stats->event |= event;
53 if (wake_up)
54 wake_up_interruptible(&device->wait);
55}
Lynus Vazbdb0c072011-11-14 16:55:04 +053056EXPORT_SYMBOL(msm_idle_stats_update_event);
Lucille Sylvester611d0d92011-08-12 16:11:25 -060057
58static enum hrtimer_restart msm_idle_stats_busy_timer(struct hrtimer *timer)
59{
60 struct msm_idle_stats_device *device =
61 container_of(timer, struct msm_idle_stats_device, busy_timer);
62
63
64 /* This is the only case that the event is modified without a device
65 * lock. However, since the timer is cancelled in the other cases we are
66 * assured that we have exclusive access to the event at this time.
67 */
68 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
Lynus Vazbdb0c072011-11-14 16:55:04 +053069 msm_idle_stats_update_event(device,
70 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED);
Lucille Sylvester611d0d92011-08-12 16:11:25 -060071 return HRTIMER_NORESTART;
72}
73
74static void start_busy_timer(struct msm_idle_stats_device *device,
75 ktime_t relative_time)
76{
77 hrtimer_cancel(&device->busy_timer);
78 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
79 if (!((device->stats->event &
80 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) ||
81 (device->stats->event & MSM_IDLE_STATS_EVENT_COLLECTION_FULL))) {
82 if (ktime_to_us(relative_time) > 0) {
83 hrtimer_start(&device->busy_timer,
84 relative_time,
85 HRTIMER_MODE_REL);
86 }
87 }
88}
89
90static unsigned int msm_idle_stats_device_poll(struct file *file,
91 poll_table *wait)
92{
93 struct msm_idle_stats_device *device = file->private_data;
94 unsigned int mask = 0;
95
96 poll_wait(file, &device->wait, wait);
97 if (device->stats->event)
98 mask = POLLIN | POLLRDNORM;
99 return mask;
100}
101
102static void msm_idle_stats_add_sample(struct msm_idle_stats_device *device,
103 struct msm_idle_pulse *pulse)
104{
105 hrtimer_cancel(&device->busy_timer);
106 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
Nilesh Shahc96b04d2012-04-02 18:37:19 +0530107 if (device->stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) {
108 pr_warning("idle_stats_device: Overwriting samples\n");
109 device->stats->nr_collected = 0;
110 }
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600111 device->stats->pulse_chain[device->stats->nr_collected] = *pulse;
112 device->stats->nr_collected++;
113
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530114 if (device->stats->nr_collected == device->max_samples) {
Lynus Vazbdb0c072011-11-14 16:55:04 +0530115 msm_idle_stats_update_event(device,
116 MSM_IDLE_STATS_EVENT_COLLECTION_FULL);
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600117 } else if (device->stats->nr_collected ==
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530118 ((device->max_samples * 3) / 4)) {
Lynus Vazbdb0c072011-11-14 16:55:04 +0530119 msm_idle_stats_update_event(device,
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600120 MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL);
121 }
122}
123
124static long ioctl_read_stats(struct msm_idle_stats_device *device,
125 unsigned long arg)
126{
127 int remaining;
128 int requested;
129 struct msm_idle_pulse pulse;
130 struct msm_idle_read_stats *stats;
131 __s64 remaining_time =
132 ktime_to_us(hrtimer_get_remaining(&device->busy_timer));
133
134 device->get_sample(device, &pulse);
135 spin_lock(&device->lock);
136 hrtimer_cancel(&device->busy_timer);
137 stats = device->stats;
138 if (stats == &device->stats_vector[0])
139 device->stats = &device->stats_vector[1];
140 else
141 device->stats = &device->stats_vector[0];
Lucille Sylvester586c84f2011-09-07 13:55:24 -0600142 device->stats->event = 0;
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600143 device->stats->nr_collected = 0;
144 spin_unlock(&device->lock);
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530145 if (stats->nr_collected >= device->max_samples) {
146 stats->nr_collected = device->max_samples;
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600147 } else {
148 stats->pulse_chain[stats->nr_collected] = pulse;
149 stats->nr_collected++;
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530150 if (stats->nr_collected == device->max_samples)
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600151 stats->event |= MSM_IDLE_STATS_EVENT_COLLECTION_FULL;
152 else if (stats->nr_collected ==
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530153 ((device->max_samples * 3) / 4))
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600154 stats->event |=
155 MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL;
156 }
157 if (remaining_time < 0) {
158 stats->busy_timer_remaining = 0;
159 } else {
160 stats->busy_timer_remaining = remaining_time;
161 if ((__s64)stats->busy_timer_remaining != remaining_time)
162 stats->busy_timer_remaining = -1;
163 }
164 stats->return_timestamp = ktime_to_us(ktime_get());
165 requested =
166 ((sizeof(*stats) - sizeof(stats->pulse_chain)) +
167 (sizeof(stats->pulse_chain[0]) * stats->nr_collected));
168 remaining = copy_to_user((void __user *)arg, stats, requested);
169 if (remaining > 0)
170 return -EFAULT;
171
172 return 0;
173}
174
175static long ioctl_write_stats(struct msm_idle_stats_device *device,
176 unsigned long arg)
177{
178 struct msm_idle_write_stats stats;
179 int remaining;
180 int ret = 0;
181
182 remaining = copy_from_user(&stats, (void __user *) arg, sizeof(stats));
183 if (remaining > 0) {
184 ret = -EFAULT;
185 } else {
186 spin_lock(&device->lock);
187 device->busy_timer_interval = us_to_ktime(stats.next_busy_timer);
188 if (ktime_to_us(device->idle_start) == 0)
189 start_busy_timer(device, us_to_ktime(stats.busy_timer));
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530190 if ((stats.max_samples > 0) &&
191 (stats.max_samples <= MSM_IDLE_STATS_NR_MAX_INTERVALS))
192 device->max_samples = stats.max_samples;
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600193 spin_unlock(&device->lock);
194 }
195 return ret;
196}
197
198void msm_idle_stats_prepare_idle_start(struct msm_idle_stats_device *device)
199{
200 spin_lock(&device->lock);
201 hrtimer_cancel(&device->busy_timer);
202 spin_unlock(&device->lock);
203}
204EXPORT_SYMBOL(msm_idle_stats_prepare_idle_start);
205
206void msm_idle_stats_abort_idle_start(struct msm_idle_stats_device *device)
207{
208 spin_lock(&device->lock);
209 if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0)
210 hrtimer_restart(&device->busy_timer);
211 spin_unlock(&device->lock);
212}
213EXPORT_SYMBOL(msm_idle_stats_abort_idle_start);
214
215void msm_idle_stats_idle_start(struct msm_idle_stats_device *device)
216{
217 spin_lock(&device->lock);
218 hrtimer_cancel(&device->busy_timer);
219 device->idle_start = ktime_get();
220 if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0) {
221 device->remaining_time =
222 hrtimer_get_remaining(&device->busy_timer);
223 if (ktime_to_us(device->remaining_time) <= 0)
Lucille Sylvester586c84f2011-09-07 13:55:24 -0600224 device->remaining_time = us_to_ktime(0);
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600225 } else {
226 device->remaining_time = us_to_ktime(0);
227 }
228 spin_unlock(&device->lock);
229}
230EXPORT_SYMBOL(msm_idle_stats_idle_start);
231
232void msm_idle_stats_idle_end(struct msm_idle_stats_device *device,
233 struct msm_idle_pulse *pulse)
234{
Nilesh Shah4217b462012-04-10 13:46:48 +0530235 int tmp;
Lynus Vaz61c77192011-11-14 16:31:24 +0530236 u32 idle_time = 0;
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600237 spin_lock(&device->lock);
238 if (ktime_to_us(device->idle_start) != 0) {
Lynus Vaz61c77192011-11-14 16:31:24 +0530239 idle_time = ktime_to_us(ktime_get())
240 - ktime_to_us(device->idle_start);
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600241 device->idle_start = us_to_ktime(0);
242 msm_idle_stats_add_sample(device, pulse);
243 if (device->stats->event &
244 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) {
245 device->stats->event &=
246 ~MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
Lynus Vazbdb0c072011-11-14 16:55:04 +0530247 msm_idle_stats_update_event(device,
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600248 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED_RESET);
249 } else if (ktime_to_us(device->busy_timer_interval) > 0) {
250 ktime_t busy_timer = device->busy_timer_interval;
Lynus Vaz61c77192011-11-14 16:31:24 +0530251 /* if it is serialized, it would be full busy,
252 * checking 80%
253 */
254 if ((pulse->wait_interval*5 >= idle_time*4) &&
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600255 (ktime_to_us(device->remaining_time) > 0) &&
256 (ktime_to_us(device->remaining_time) <
257 ktime_to_us(busy_timer)))
258 busy_timer = device->remaining_time;
259 start_busy_timer(device, busy_timer);
Nilesh Shah4217b462012-04-10 13:46:48 +0530260 /* If previous busy interval exceeds the current submit,
261 * raise a busy timer expired event intentionally.
262 */
263 tmp = device->stats->nr_collected - 1;
264 if (tmp > 0) {
265 if ((device->stats->pulse_chain[tmp - 1].busy_start_time
266 + device->stats->pulse_chain[tmp - 1].busy_interval) >
267 device->stats->pulse_chain[tmp].busy_start_time)
268 msm_idle_stats_update_event(device,
269 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED);
270 }
271 }
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600272 }
273 spin_unlock(&device->lock);
274}
275EXPORT_SYMBOL(msm_idle_stats_idle_end);
276
277static long msm_idle_stats_device_ioctl(struct file *file, unsigned int cmd,
278 unsigned long arg)
279{
280 struct msm_idle_stats_device *device = file->private_data;
281 int ret;
282
283 switch (cmd) {
284 case MSM_IDLE_STATS_IOC_READ_STATS:
285 ret = ioctl_read_stats(device, arg);
286 break;
287 case MSM_IDLE_STATS_IOC_WRITE_STATS:
288 ret = ioctl_write_stats(device, arg);
289 break;
290 default:
291 ret = -EINVAL;
292 }
293
294 return ret;
295}
296
297static int msm_idle_stats_device_release
298 (struct inode *inode, struct file *filep)
299{
300 return 0;
301}
302
303static int msm_idle_stats_device_open(struct inode *inode, struct file *filep)
304{
305 struct msm_idle_stats_device *device;
306
307
308 device = _device_from_minor(iminor(inode));
309
310 if (device == NULL)
311 return -EPERM;
312
313 filep->private_data = device;
314 return 0;
315}
316
317static const struct file_operations msm_idle_stats_fops = {
318 .open = msm_idle_stats_device_open,
319 .release = msm_idle_stats_device_release,
320 .unlocked_ioctl = msm_idle_stats_device_ioctl,
321 .poll = msm_idle_stats_device_poll,
322};
323
324int msm_idle_stats_register_device(struct msm_idle_stats_device *device)
325{
326 int ret = -ENOMEM;
327
328 spin_lock_init(&device->lock);
329 init_waitqueue_head(&device->wait);
330 hrtimer_init(&device->busy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
331 device->busy_timer.function = msm_idle_stats_busy_timer;
332
333 device->stats_vector[0].event = 0;
334 device->stats_vector[0].nr_collected = 0;
335 device->stats_vector[1].event = 0;
336 device->stats_vector[1].nr_collected = 0;
337 device->stats = &device->stats_vector[0];
338 device->busy_timer_interval = us_to_ktime(0);
Lynus Vazfdecd1e2012-01-17 15:45:49 +0530339 device->max_samples = MSM_IDLE_STATS_NR_MAX_INTERVALS;
Lucille Sylvester611d0d92011-08-12 16:11:25 -0600340
341 mutex_lock(&device_list_lock);
342 list_add(&device->list, &device_list);
343 mutex_unlock(&device_list_lock);
344
345 device->miscdev.minor = MISC_DYNAMIC_MINOR;
346 device->miscdev.name = device->name;
347 device->miscdev.fops = &msm_idle_stats_fops;
348
349 ret = misc_register(&device->miscdev);
350
351 if (ret)
352 goto err_list;
353
354 return ret;
355
356err_list:
357 mutex_lock(&device_list_lock);
358 list_del(&device->list);
359 mutex_unlock(&device_list_lock);
360 return ret;
361}
362EXPORT_SYMBOL(msm_idle_stats_register_device);
363
364int msm_idle_stats_deregister_device(struct msm_idle_stats_device *device)
365{
366 if (device == NULL)
367 return 0;
368
369 mutex_lock(&device_list_lock);
370 spin_lock(&device->lock);
371 hrtimer_cancel(&device->busy_timer);
372 list_del(&device->list);
373 spin_unlock(&device->lock);
374 mutex_unlock(&device_list_lock);
375
376 return misc_deregister(&device->miscdev);
377}
378EXPORT_SYMBOL(msm_idle_stats_deregister_device);