blob: f4d3a272329174375eb3692b9ba093e3406e5717 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/cdev.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/hrtimer.h>
21#include <linux/interrupt.h>
22#include <linux/ktime.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25#include <linux/version.h>
26#include <linux/sched.h>
27#include <asm/uaccess.h>
28
29#include "idle_stats.h"
Abhijeet Dharmapurikarefaca4f2011-12-27 16:24:07 -080030#include <mach/cpuidle.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32/******************************************************************************
33 * Debug Definitions
34 *****************************************************************************/
35
36enum {
37 MSM_IDLE_STATS_DEBUG_API = BIT(0),
38 MSM_IDLE_STATS_DEBUG_SIGNAL = BIT(1),
39 MSM_IDLE_STATS_DEBUG_MIGRATION = BIT(2),
40};
41
42static int msm_idle_stats_debug_mask;
43module_param_named(
44 debug_mask, msm_idle_stats_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
45);
46
47/******************************************************************************
48 * Driver Definitions
49 *****************************************************************************/
50
51#define MSM_IDLE_STATS_DRIVER_NAME "msm_idle_stats"
52
53static dev_t msm_idle_stats_dev_nr;
54static struct cdev msm_idle_stats_cdev;
55static struct class *msm_idle_stats_class;
56
57/******************************************************************************
58 * Device Definitions
59 *****************************************************************************/
60
61struct msm_idle_stats_device {
62 unsigned int cpu;
63 struct mutex mutex;
64 struct notifier_block notifier;
65
66 int64_t collection_expiration;
67 struct msm_idle_stats stats;
68 struct hrtimer timer;
69
70 wait_queue_head_t wait_q;
71 atomic_t collecting;
72};
73
74static DEFINE_SPINLOCK(msm_idle_stats_devs_lock);
75static DEFINE_PER_CPU(struct msm_idle_stats_device *, msm_idle_stats_devs);
76
77/******************************************************************************
78 *
79 *****************************************************************************/
80
81static inline int64_t msm_idle_stats_bound_interval(int64_t interval)
82{
83 if (interval <= 0)
84 return 1;
85
86 if (interval > UINT_MAX)
87 return UINT_MAX;
88
89 return interval;
90}
91
92static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
93{
94 struct msm_idle_stats_device *stats_dev;
95 unsigned int cpu;
96 int64_t now;
97 int64_t interval;
98
99 stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
100 cpu = get_cpu();
101
102 if (cpu != stats_dev->cpu) {
103 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
104 pr_info("%s: timer migrated from cpu%u to cpu%u\n",
105 __func__, stats_dev->cpu, cpu);
106
107 stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
108 goto timer_exit;
109 }
110
111 now = ktime_to_us(ktime_get());
112 interval = now - stats_dev->stats.last_busy_start;
113
114 if (stats_dev->stats.busy_timer > 0 &&
115 interval >= stats_dev->stats.busy_timer - 1)
116 stats_dev->stats.event =
117 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
118 else
119 stats_dev->stats.event =
120 MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
121
122timer_exit:
123 atomic_set(&stats_dev->collecting, 0);
124 wake_up_interruptible(&stats_dev->wait_q);
125
126 put_cpu();
127 return HRTIMER_NORESTART;
128}
129
130static void msm_idle_stats_pre_idle(struct msm_idle_stats_device *stats_dev)
131{
132 int64_t now;
133 int64_t interval;
134
135 if (smp_processor_id() != stats_dev->cpu) {
136 WARN_ON(1);
137 return;
138 }
139
140 if (!atomic_read(&stats_dev->collecting))
141 return;
142
143 hrtimer_cancel(&stats_dev->timer);
144
145 now = ktime_to_us(ktime_get());
146 interval = now - stats_dev->stats.last_busy_start;
147 interval = msm_idle_stats_bound_interval(interval);
148
149 stats_dev->stats.busy_intervals[stats_dev->stats.nr_collected]
150 = (__u32) interval;
151 stats_dev->stats.last_idle_start = now;
152}
153
154static void msm_idle_stats_post_idle(struct msm_idle_stats_device *stats_dev)
155{
156 int64_t now;
157 int64_t interval;
158 int64_t timer_interval;
159 int rc;
160
161 if (smp_processor_id() != stats_dev->cpu) {
162 WARN_ON(1);
163 return;
164 }
165
166 if (!atomic_read(&stats_dev->collecting))
167 return;
168
169 now = ktime_to_us(ktime_get());
170 interval = now - stats_dev->stats.last_idle_start;
171 interval = msm_idle_stats_bound_interval(interval);
172
173 stats_dev->stats.idle_intervals[stats_dev->stats.nr_collected]
174 = (__u32) interval;
175 stats_dev->stats.nr_collected++;
176 stats_dev->stats.last_busy_start = now;
177
178 if (stats_dev->stats.nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) {
179 stats_dev->stats.event = MSM_IDLE_STATS_EVENT_COLLECTION_FULL;
180 goto post_idle_collection_done;
181 }
182
183 timer_interval = stats_dev->collection_expiration - now;
184 if (timer_interval <= 0) {
185 stats_dev->stats.event =
186 MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
187 goto post_idle_collection_done;
188 }
189
190 if (stats_dev->stats.busy_timer > 0 &&
191 timer_interval > stats_dev->stats.busy_timer)
192 timer_interval = stats_dev->stats.busy_timer;
193
194 rc = hrtimer_start(&stats_dev->timer,
195 ktime_set(0, timer_interval * 1000), HRTIMER_MODE_REL_PINNED);
196 WARN_ON(rc);
197
198 return;
199
200post_idle_collection_done:
201 atomic_set(&stats_dev->collecting, 0);
202 wake_up_interruptible(&stats_dev->wait_q);
203}
204
205static int msm_idle_stats_notified(struct notifier_block *nb,
206 unsigned long val, void *v)
207{
208 struct msm_idle_stats_device *stats_dev = container_of(
209 nb, struct msm_idle_stats_device, notifier);
210
211 if (val == MSM_CPUIDLE_STATE_EXIT)
212 msm_idle_stats_post_idle(stats_dev);
213 else
214 msm_idle_stats_pre_idle(stats_dev);
215
216 return 0;
217}
218
219static int msm_idle_stats_collect(struct file *filp,
220 unsigned int cmd, unsigned long arg)
221{
222 struct msm_idle_stats_device *stats_dev;
223 struct msm_idle_stats *stats;
224 int rc;
225
226 stats_dev = (struct msm_idle_stats_device *) filp->private_data;
227 stats = &stats_dev->stats;
228
229 rc = mutex_lock_interruptible(&stats_dev->mutex);
230 if (rc) {
231 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
232 pr_info("%s: interrupted while waiting on device "
233 "mutex\n", __func__);
234
235 rc = -EINTR;
236 goto collect_exit;
237 }
238
239 if (atomic_read(&stats_dev->collecting)) {
240 pr_err("%s: inconsistent state\n", __func__);
241 rc = -EBUSY;
242 goto collect_unlock_exit;
243 }
244
245 rc = copy_from_user(stats, (void *)arg, sizeof(*stats));
246 if (rc) {
247 rc = -EFAULT;
248 goto collect_unlock_exit;
249 }
250
251 if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS ||
252 stats->busy_timer > MSM_IDLE_STATS_MAX_TIMER ||
253 stats->collection_timer > MSM_IDLE_STATS_MAX_TIMER) {
254 rc = -EINVAL;
255 goto collect_unlock_exit;
256 }
257
258 if (get_cpu() != stats_dev->cpu) {
259 put_cpu();
260 rc = -EACCES;
261 goto collect_unlock_exit;
262 }
263
264 /*
265 * When collection_timer == 0, stop collecting at the next
266 * post idle.
267 */
268 stats_dev->collection_expiration =
269 ktime_to_us(ktime_get()) + stats->collection_timer;
270
271 /*
272 * Enable collection before starting any timer.
273 */
274 atomic_set(&stats_dev->collecting, 1);
275
276 /*
277 * When busy_timer == 0, do not set any busy timer.
278 */
279 if (stats->busy_timer > 0) {
280 rc = hrtimer_start(&stats_dev->timer,
281 ktime_set(0, stats->busy_timer * 1000),
282 HRTIMER_MODE_REL_PINNED);
283 WARN_ON(rc);
284 }
285
286 put_cpu();
287 if (wait_event_interruptible(stats_dev->wait_q,
288 !atomic_read(&stats_dev->collecting))) {
289 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
290 pr_info("%s: interrupted while waiting on "
291 "collection\n", __func__);
292
293 hrtimer_cancel(&stats_dev->timer);
294 atomic_set(&stats_dev->collecting, 0);
295
296 rc = -EINTR;
297 goto collect_unlock_exit;
298 }
299
300 stats->return_timestamp = ktime_to_us(ktime_get());
301
302 rc = copy_to_user((void *)arg, stats, sizeof(*stats));
303 if (rc) {
304 rc = -EFAULT;
305 goto collect_unlock_exit;
306 }
307
308collect_unlock_exit:
309 mutex_unlock(&stats_dev->mutex);
310
311collect_exit:
312 return rc;
313}
314
315static int msm_idle_stats_open(struct inode *inode, struct file *filp)
316{
317 struct msm_idle_stats_device *stats_dev;
318 int rc;
319
320 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
321 pr_info("%s: enter\n", __func__);
322
323 rc = nonseekable_open(inode, filp);
324 if (rc) {
325 pr_err("%s: failed to set nonseekable\n", __func__);
326 goto open_bail;
327 }
328
329 stats_dev = (struct msm_idle_stats_device *)
330 kzalloc(sizeof(*stats_dev), GFP_KERNEL);
331 if (!stats_dev) {
332 pr_err("%s: failed to allocate device struct\n", __func__);
333 rc = -ENOMEM;
334 goto open_bail;
335 }
336
337 stats_dev->cpu = MINOR(inode->i_rdev);
338 mutex_init(&stats_dev->mutex);
339 stats_dev->notifier.notifier_call = msm_idle_stats_notified;
340 hrtimer_init(&stats_dev->timer,
341 CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
342 stats_dev->timer.function = msm_idle_stats_timer;
343 init_waitqueue_head(&stats_dev->wait_q);
344 atomic_set(&stats_dev->collecting, 0);
345
346 filp->private_data = stats_dev;
347
348 /*
349 * Make sure only one device exists per cpu.
350 */
351 spin_lock(&msm_idle_stats_devs_lock);
352 if (per_cpu(msm_idle_stats_devs, stats_dev->cpu)) {
353 spin_unlock(&msm_idle_stats_devs_lock);
354 rc = -EBUSY;
355 goto open_free_bail;
356 }
357
358 per_cpu(msm_idle_stats_devs, stats_dev->cpu) = stats_dev;
359 spin_unlock(&msm_idle_stats_devs_lock);
360
361 rc = msm_cpuidle_register_notifier(stats_dev->cpu,
362 &stats_dev->notifier);
363 if (rc) {
364 pr_err("%s: failed to register idle notification\n", __func__);
365 goto open_null_bail;
366 }
367
368 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
369 pr_info("%s: done\n", __func__);
370 return 0;
371
372open_null_bail:
373 spin_lock(&msm_idle_stats_devs_lock);
374 per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
375 spin_unlock(&msm_idle_stats_devs_lock);
376
377open_free_bail:
378 kfree(stats_dev);
379
380open_bail:
381 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
382 pr_info("%s: exit, %d\n", __func__, rc);
383 return rc;
384}
385
386static int msm_idle_stats_release(struct inode *inode, struct file *filp)
387{
388 struct msm_idle_stats_device *stats_dev;
389 int rc;
390
391 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
392 pr_info("%s: enter\n", __func__);
393
394 stats_dev = (struct msm_idle_stats_device *) filp->private_data;
395 rc = msm_cpuidle_unregister_notifier(stats_dev->cpu,
396 &stats_dev->notifier);
397 WARN_ON(rc);
398
399 spin_lock(&msm_idle_stats_devs_lock);
400 per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
401 spin_unlock(&msm_idle_stats_devs_lock);
402 filp->private_data = NULL;
403
404 hrtimer_cancel(&stats_dev->timer);
405 kfree(stats_dev);
406
407 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
408 pr_info("%s: done\n", __func__);
409 return 0;
410}
411
412static long msm_idle_stats_ioctl(struct file *filp, unsigned int cmd,
413 unsigned long arg)
414{
415 int rc;
416
417 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
418 pr_info("%s: enter\n", __func__);
419
420 switch (cmd) {
421 case MSM_IDLE_STATS_IOC_COLLECT:
422 rc = msm_idle_stats_collect(filp, cmd, arg);
423 break;
424
425 default:
426 rc = -ENOTTY;
427 break;
428 }
429
430 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
431 pr_info("%s: exit, %d\n", __func__, rc);
432 return rc;
433}
434
435/******************************************************************************
436 *
437 *****************************************************************************/
438
439static const struct file_operations msm_idle_stats_fops = {
440 .owner = THIS_MODULE,
441 .open = msm_idle_stats_open,
442 .release = msm_idle_stats_release,
443 .unlocked_ioctl = msm_idle_stats_ioctl,
444};
445
446static int __init msm_idle_stats_init(void)
447{
448 unsigned int nr_cpus = num_possible_cpus();
449 struct device *dev;
450 int rc;
451 int i;
452
453 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
454 pr_info("%s: enter\n", __func__);
455
456 rc = alloc_chrdev_region(&msm_idle_stats_dev_nr,
457 0, nr_cpus, MSM_IDLE_STATS_DRIVER_NAME);
458 if (rc) {
459 pr_err("%s: failed to allocate device number, rc %d\n",
460 __func__, rc);
461 goto init_bail;
462 }
463
464 msm_idle_stats_class = class_create(THIS_MODULE,
465 MSM_IDLE_STATS_DRIVER_NAME);
466 if (IS_ERR(msm_idle_stats_class)) {
467 pr_err("%s: failed to create device class\n", __func__);
468 rc = -ENOMEM;
469 goto init_unreg_bail;
470 }
471
472 for (i = 0; i < nr_cpus; i++) {
473 dev = device_create(msm_idle_stats_class, NULL,
474 msm_idle_stats_dev_nr + i, NULL,
475 MSM_IDLE_STATS_DRIVER_NAME "%d", i);
476
477 if (!dev) {
478 pr_err("%s: failed to create device %d\n",
479 __func__, i);
480 rc = -ENOMEM;
481 goto init_remove_bail;
482 }
483 }
484
485 cdev_init(&msm_idle_stats_cdev, &msm_idle_stats_fops);
486 msm_idle_stats_cdev.owner = THIS_MODULE;
487
488 /*
489 * Call cdev_add() last, after everything else is initialized and
490 * the driver is ready to accept system calls.
491 */
492 rc = cdev_add(&msm_idle_stats_cdev, msm_idle_stats_dev_nr, nr_cpus);
493 if (rc) {
494 pr_err("%s: failed to register char device, rc %d\n",
495 __func__, rc);
496 goto init_remove_bail;
497 }
498
499 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
500 pr_info("%s: done\n", __func__);
501 return 0;
502
503init_remove_bail:
504 for (i = i - 1; i >= 0; i--)
505 device_destroy(
506 msm_idle_stats_class, msm_idle_stats_dev_nr + i);
507
508 class_destroy(msm_idle_stats_class);
509
510init_unreg_bail:
511 unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
512
513init_bail:
514 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
515 pr_info("%s: exit, %d\n", __func__, rc);
516 return rc;
517}
518
519static void __exit msm_idle_stats_exit(void)
520{
521 unsigned int nr_cpus = num_possible_cpus();
522 int i;
523
524 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
525 pr_info("%s: enter\n", __func__);
526
527 cdev_del(&msm_idle_stats_cdev);
528
529 for (i = nr_cpus - 1; i >= 0; i--)
530 device_destroy(
531 msm_idle_stats_class, msm_idle_stats_dev_nr + i);
532
533 class_destroy(msm_idle_stats_class);
534 unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
535
536 if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
537 pr_info("%s: done\n", __func__);
538}
539
540module_init(msm_idle_stats_init);
541module_exit(msm_idle_stats_exit);
542
543MODULE_LICENSE("GPL v2");
544MODULE_DESCRIPTION("idle stats driver");
545MODULE_VERSION("1.0");