blob: 511867209686131695f0984dda1cbbfa69d0892e [file] [log] [blame]
Lucille Sylvester611d0d92011-08-12 16:11:25 -06001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include <linux/miscdevice.h>
18#include <linux/poll.h>
19#include <linux/uaccess.h>
Lucille Sylvester20ec38a2011-09-14 10:51:28 -060020#include <linux/idle_stats_device.h>
Lucille Sylvester611d0d92011-08-12 16:11:25 -060021
22DEFINE_MUTEX(device_list_lock);
23LIST_HEAD(device_list);
24
25static ktime_t us_to_ktime(__u32 us)
26{
27 return ns_to_ktime((u64)us * NSEC_PER_USEC);
28}
29
30static struct msm_idle_stats_device *_device_from_minor(unsigned int minor)
31{
32 struct msm_idle_stats_device *device, *ret = NULL;
33
34
35 mutex_lock(&device_list_lock);
36 list_for_each_entry(device, &device_list, list) {
37 if (minor == device->miscdev.minor) {
38 ret = device;
39 break;
40 }
41 }
42 mutex_unlock(&device_list_lock);
43 return ret;
44}
45
46static void update_event
47 (struct msm_idle_stats_device *device, __u32 event)
48{
49 __u32 wake_up = !device->stats->event;
50
51 device->stats->event |= event;
52 if (wake_up)
53 wake_up_interruptible(&device->wait);
54}
55
56static enum hrtimer_restart msm_idle_stats_busy_timer(struct hrtimer *timer)
57{
58 struct msm_idle_stats_device *device =
59 container_of(timer, struct msm_idle_stats_device, busy_timer);
60
61
62 /* This is the only case that the event is modified without a device
63 * lock. However, since the timer is cancelled in the other cases we are
64 * assured that we have exclusive access to the event at this time.
65 */
66 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
67 update_event(device, MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED);
68 return HRTIMER_NORESTART;
69}
70
71static void start_busy_timer(struct msm_idle_stats_device *device,
72 ktime_t relative_time)
73{
74 hrtimer_cancel(&device->busy_timer);
75 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
76 if (!((device->stats->event &
77 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) ||
78 (device->stats->event & MSM_IDLE_STATS_EVENT_COLLECTION_FULL))) {
79 if (ktime_to_us(relative_time) > 0) {
80 hrtimer_start(&device->busy_timer,
81 relative_time,
82 HRTIMER_MODE_REL);
83 }
84 }
85}
86
87static unsigned int msm_idle_stats_device_poll(struct file *file,
88 poll_table *wait)
89{
90 struct msm_idle_stats_device *device = file->private_data;
91 unsigned int mask = 0;
92
93 poll_wait(file, &device->wait, wait);
94 if (device->stats->event)
95 mask = POLLIN | POLLRDNORM;
96 return mask;
97}
98
99static void msm_idle_stats_add_sample(struct msm_idle_stats_device *device,
100 struct msm_idle_pulse *pulse)
101{
102 hrtimer_cancel(&device->busy_timer);
103 hrtimer_set_expires(&device->busy_timer, us_to_ktime(0));
104 if (device->stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS)
105 return;
106
107 device->stats->pulse_chain[device->stats->nr_collected] = *pulse;
108 device->stats->nr_collected++;
109
110 if (device->stats->nr_collected == MSM_IDLE_STATS_NR_MAX_INTERVALS) {
111 update_event(device, MSM_IDLE_STATS_EVENT_COLLECTION_FULL);
112 } else if (device->stats->nr_collected ==
113 ((MSM_IDLE_STATS_NR_MAX_INTERVALS * 3) / 4)) {
114 update_event(device,
115 MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL);
116 }
117}
118
119static long ioctl_read_stats(struct msm_idle_stats_device *device,
120 unsigned long arg)
121{
122 int remaining;
123 int requested;
124 struct msm_idle_pulse pulse;
125 struct msm_idle_read_stats *stats;
126 __s64 remaining_time =
127 ktime_to_us(hrtimer_get_remaining(&device->busy_timer));
128
129 device->get_sample(device, &pulse);
130 spin_lock(&device->lock);
131 hrtimer_cancel(&device->busy_timer);
132 stats = device->stats;
133 if (stats == &device->stats_vector[0])
134 device->stats = &device->stats_vector[1];
135 else
136 device->stats = &device->stats_vector[0];
137 device->stats->event = (stats->event &
138 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED);
139 device->stats->nr_collected = 0;
140 spin_unlock(&device->lock);
141 if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) {
142 stats->nr_collected = MSM_IDLE_STATS_NR_MAX_INTERVALS;
143 } else {
144 stats->pulse_chain[stats->nr_collected] = pulse;
145 stats->nr_collected++;
146 if (stats->nr_collected == MSM_IDLE_STATS_NR_MAX_INTERVALS)
147 stats->event |= MSM_IDLE_STATS_EVENT_COLLECTION_FULL;
148 else if (stats->nr_collected ==
149 ((MSM_IDLE_STATS_NR_MAX_INTERVALS * 3) / 4))
150 stats->event |=
151 MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL;
152 }
153 if (remaining_time < 0) {
154 stats->busy_timer_remaining = 0;
155 } else {
156 stats->busy_timer_remaining = remaining_time;
157 if ((__s64)stats->busy_timer_remaining != remaining_time)
158 stats->busy_timer_remaining = -1;
159 }
160 stats->return_timestamp = ktime_to_us(ktime_get());
161 requested =
162 ((sizeof(*stats) - sizeof(stats->pulse_chain)) +
163 (sizeof(stats->pulse_chain[0]) * stats->nr_collected));
164 remaining = copy_to_user((void __user *)arg, stats, requested);
165 if (remaining > 0)
166 return -EFAULT;
167
168 return 0;
169}
170
171static long ioctl_write_stats(struct msm_idle_stats_device *device,
172 unsigned long arg)
173{
174 struct msm_idle_write_stats stats;
175 int remaining;
176 int ret = 0;
177
178 remaining = copy_from_user(&stats, (void __user *) arg, sizeof(stats));
179 if (remaining > 0) {
180 ret = -EFAULT;
181 } else {
182 spin_lock(&device->lock);
183 device->busy_timer_interval = us_to_ktime(stats.next_busy_timer);
184 if (ktime_to_us(device->idle_start) == 0)
185 start_busy_timer(device, us_to_ktime(stats.busy_timer));
186 spin_unlock(&device->lock);
187 }
188 return ret;
189}
190
191void msm_idle_stats_prepare_idle_start(struct msm_idle_stats_device *device)
192{
193 spin_lock(&device->lock);
194 hrtimer_cancel(&device->busy_timer);
195 spin_unlock(&device->lock);
196}
197EXPORT_SYMBOL(msm_idle_stats_prepare_idle_start);
198
199void msm_idle_stats_abort_idle_start(struct msm_idle_stats_device *device)
200{
201 spin_lock(&device->lock);
202 if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0)
203 hrtimer_restart(&device->busy_timer);
204 spin_unlock(&device->lock);
205}
206EXPORT_SYMBOL(msm_idle_stats_abort_idle_start);
207
208void msm_idle_stats_idle_start(struct msm_idle_stats_device *device)
209{
210 spin_lock(&device->lock);
211 hrtimer_cancel(&device->busy_timer);
212 device->idle_start = ktime_get();
213 if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0) {
214 device->remaining_time =
215 hrtimer_get_remaining(&device->busy_timer);
216 if (ktime_to_us(device->remaining_time) <= 0)
217 device->remaining_time = us_to_ktime(1);
218 } else {
219 device->remaining_time = us_to_ktime(0);
220 }
221 spin_unlock(&device->lock);
222}
223EXPORT_SYMBOL(msm_idle_stats_idle_start);
224
225void msm_idle_stats_idle_end(struct msm_idle_stats_device *device,
226 struct msm_idle_pulse *pulse)
227{
228 spin_lock(&device->lock);
229 if (ktime_to_us(device->idle_start) != 0) {
230 device->idle_start = us_to_ktime(0);
231 msm_idle_stats_add_sample(device, pulse);
232 if (device->stats->event &
233 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) {
234 device->stats->event &=
235 ~MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
236 update_event(device,
237 MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED_RESET);
238 } else if (ktime_to_us(device->busy_timer_interval) > 0) {
239 ktime_t busy_timer = device->busy_timer_interval;
240 if ((pulse->wait_interval > 0) &&
241 (ktime_to_us(device->remaining_time) > 0) &&
242 (ktime_to_us(device->remaining_time) <
243 ktime_to_us(busy_timer)))
244 busy_timer = device->remaining_time;
245 start_busy_timer(device, busy_timer);
246 }
247 }
248 spin_unlock(&device->lock);
249}
250EXPORT_SYMBOL(msm_idle_stats_idle_end);
251
252static long msm_idle_stats_device_ioctl(struct file *file, unsigned int cmd,
253 unsigned long arg)
254{
255 struct msm_idle_stats_device *device = file->private_data;
256 int ret;
257
258 switch (cmd) {
259 case MSM_IDLE_STATS_IOC_READ_STATS:
260 ret = ioctl_read_stats(device, arg);
261 break;
262 case MSM_IDLE_STATS_IOC_WRITE_STATS:
263 ret = ioctl_write_stats(device, arg);
264 break;
265 default:
266 ret = -EINVAL;
267 }
268
269 return ret;
270}
271
272static int msm_idle_stats_device_release
273 (struct inode *inode, struct file *filep)
274{
275 return 0;
276}
277
278static int msm_idle_stats_device_open(struct inode *inode, struct file *filep)
279{
280 struct msm_idle_stats_device *device;
281
282
283 device = _device_from_minor(iminor(inode));
284
285 if (device == NULL)
286 return -EPERM;
287
288 filep->private_data = device;
289 return 0;
290}
291
292static const struct file_operations msm_idle_stats_fops = {
293 .open = msm_idle_stats_device_open,
294 .release = msm_idle_stats_device_release,
295 .unlocked_ioctl = msm_idle_stats_device_ioctl,
296 .poll = msm_idle_stats_device_poll,
297};
298
299int msm_idle_stats_register_device(struct msm_idle_stats_device *device)
300{
301 int ret = -ENOMEM;
302
303 spin_lock_init(&device->lock);
304 init_waitqueue_head(&device->wait);
305 hrtimer_init(&device->busy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
306 device->busy_timer.function = msm_idle_stats_busy_timer;
307
308 device->stats_vector[0].event = 0;
309 device->stats_vector[0].nr_collected = 0;
310 device->stats_vector[1].event = 0;
311 device->stats_vector[1].nr_collected = 0;
312 device->stats = &device->stats_vector[0];
313 device->busy_timer_interval = us_to_ktime(0);
314
315 mutex_lock(&device_list_lock);
316 list_add(&device->list, &device_list);
317 mutex_unlock(&device_list_lock);
318
319 device->miscdev.minor = MISC_DYNAMIC_MINOR;
320 device->miscdev.name = device->name;
321 device->miscdev.fops = &msm_idle_stats_fops;
322
323 ret = misc_register(&device->miscdev);
324
325 if (ret)
326 goto err_list;
327
328 return ret;
329
330err_list:
331 mutex_lock(&device_list_lock);
332 list_del(&device->list);
333 mutex_unlock(&device_list_lock);
334 return ret;
335}
336EXPORT_SYMBOL(msm_idle_stats_register_device);
337
338int msm_idle_stats_deregister_device(struct msm_idle_stats_device *device)
339{
340 if (device == NULL)
341 return 0;
342
343 mutex_lock(&device_list_lock);
344 spin_lock(&device->lock);
345 hrtimer_cancel(&device->busy_timer);
346 list_del(&device->list);
347 spin_unlock(&device->lock);
348 mutex_unlock(&device_list_lock);
349
350 return misc_deregister(&device->miscdev);
351}
352EXPORT_SYMBOL(msm_idle_stats_deregister_device);