blob: 031b2dc0825c6a5aefcb6d1a2a231bde2fc3c7a7 [file] [log] [blame]
Priyanka Mathur3abfd442013-01-11 12:58:51 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Priyanka Mathur9a75ec52012-12-07 16:02:40 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pm.h>
17#include <linux/mutex.h>
18#include <linux/uaccess.h>
19#include <linux/debugfs.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22#include <linux/ctype.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <mach/socinfo.h>
26#if defined(CONFIG_MSM_RPM)
27#include "rpm_resources.h"
28#endif
Priyanka Mathur3abfd442013-01-11 12:58:51 -080029#if defined(CONFIG_MSM_RPM_SMD)
30#include "lpm_resources.h"
31#endif
Priyanka Mathur9a75ec52012-12-07 16:02:40 -080032#include "timer.h"
33#include "test-lpm.h"
34
35#define LPM_STATS_RESET "reset"
36#define LPM_TEST_ALL_LEVELS "lpm"
37#define LPM_TEST_LATENCIES "latency"
38#define LPM_TEST_CLEAR "clear"
39#define BUF_SIZE 200
40#define STAT_BUF_EXTRA_SIZE 500
41#define WAIT_FOR_XO 1
42#define COMM_BUF_SIZE 15
43#define INPUT_COUNT_BUF 10
44#define LPM_DEFAULT_CPU 0
45
46#define SNPRINTF(buf, size, format, ...) \
47{ \
48 if (size > 0) { \
49 int ret; \
50 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
51 if (ret > size) { \
52 buf += size; \
53 size = 0; \
54 } else { \
55 buf += ret; \
56 size -= ret; \
57 } \
58 } \
59} \
60
61static DEFINE_MUTEX(lpm_stats_mutex);
62
63struct lpm_level_stat {
64 char level_name[BUF_SIZE];
65 int64_t min_time;
66 int64_t max_time;
67 int64_t avg_time;
68 int64_t exit_early;
69 int64_t count;
70 unsigned long min_threshold;
71 uint32_t kernel_sleep_time;
72 bool entered;
73};
74
75static DEFINE_PER_CPU(struct lpm_level_stat *, lpm_levels);
76
77static struct dentry *lpm_stat;
78static struct dentry *lpm_ext_comm;
79static struct msm_rpmrs_level *lpm_supp_level;
80static int lpm_level_count;
81static int lpm_level_iter;
82static bool msm_lpm_use_qtimer;
83static unsigned long lpm_sleep_time;
84static bool lpm_latency_test;
85
86static unsigned int timer_interval = 5000;
87module_param_named(lpm_timer_interval_msec, timer_interval, uint,
88 S_IRUGO | S_IWUSR | S_IWGRP);
89
90static unsigned int latency_test_interval = 50;
91module_param_named(lpm_latency_timer_interval_usec, latency_test_interval, uint,
92 S_IRUGO | S_IWUSR | S_IWGRP);
93
94static unsigned int cpu_to_debug = LPM_DEFAULT_CPU;
95static int lpm_cpu_update(const char *val, const struct kernel_param *kp)
96{
97 int ret = 0;
98 unsigned int debug_val;
99
100 ret = kstrtouint(val, 10, &debug_val);
101 if ((ret < 0) || (debug_val >= num_possible_cpus()))
102 return -EINVAL;
103 cpu_to_debug = debug_val;
104 return ret;
105}
106
107static struct kernel_param_ops cpu_debug_events = {
108 .set = lpm_cpu_update,
109};
110
111module_param_cb(cpu_to_debug, &cpu_debug_events, &cpu_to_debug,
112 S_IRUGO | S_IWUSR | S_IWGRP);
113
114static void lpm_populate_name(struct lpm_level_stat *stat,
115 struct msm_rpmrs_level *supp)
116{
117 char nm[BUF_SIZE] = {0};
118 char default_buf[20];
119
120 switch (supp->sleep_mode) {
121 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
122 strlcat(nm, "WFI ", BUF_SIZE);
123 break;
124 case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT:
125 strlcat(nm, "WFI voltage Rampdown ", BUF_SIZE);
126 break;
127 case MSM_PM_SLEEP_MODE_RETENTION:
128 strlcat(nm, "Retention ", BUF_SIZE);
129 break;
130 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
131 strlcat(nm, "Standalone Power collapse ", BUF_SIZE);
132 break;
133 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
134 strlcat(nm, "Idle Power collapse ", BUF_SIZE);
135 break;
136 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
137 strlcat(nm, "Suspend Power collapse ", BUF_SIZE);
138 break;
139 default:
140 strlcat(nm, "Invalid Mode ", BUF_SIZE);
141 break;
142 }
143
144 switch (msm_pm_get_pxo(&(supp->rs_limits))) {
145 case MSM_PM(PXO_OFF):
146 strlcat(nm, "XO: OFF ", BUF_SIZE);
147 break;
148 case MSM_PM(PXO_ON):
149 strlcat(nm, "XO: ON ", BUF_SIZE);
150 break;
151 default:
152 snprintf(default_buf, sizeof(default_buf),
153 "XO : %d ", msm_pm_get_pxo(&(supp->rs_limits)));
154 strlcat(nm, default_buf , BUF_SIZE);
155 break;
156 }
157
158 switch (msm_pm_get_l2_cache(&(supp->rs_limits))) {
159 case MSM_PM(L2_CACHE_HSFS_OPEN):
160 strlcat(nm, "L2: HSFS ", BUF_SIZE);
161 break;
162 case MSM_PM(L2_CACHE_GDHS):
163 strlcat(nm, "L2: GDHS ", BUF_SIZE);
164 break;
165 case MSM_PM(L2_CACHE_RETENTION):
166 strlcat(nm, "L2: Retention ", BUF_SIZE);
167 break;
168 case MSM_PM(L2_CACHE_ACTIVE):
169 strlcat(nm, "L2: Active ", BUF_SIZE);
170 break;
171 default:
172 snprintf(default_buf, sizeof(default_buf),
173 "L2 : %d ", msm_pm_get_l2_cache(&(supp->rs_limits)));
174 strlcat(nm, default_buf , BUF_SIZE);
175 break;
176 }
177
178 snprintf(default_buf, sizeof(default_buf),
179 "Vdd_mem : %d ", msm_pm_get_vdd_mem(&(supp->rs_limits)));
180 strlcat(nm, default_buf , BUF_SIZE);
181
182 snprintf(default_buf, sizeof(default_buf),
183 "Vdd_dig : %d ", msm_pm_get_vdd_dig(&(supp->rs_limits)));
184 strlcat(nm, default_buf , BUF_SIZE);
185
186 strlcpy(stat->level_name, nm, strnlen(nm, BUF_SIZE));
187}
188
189static int64_t msm_lpm_get_time(void)
190{
191 if (msm_lpm_use_qtimer)
192 return ktime_to_ns(ktime_get());
193
194 return msm_timer_get_sclk_time(NULL);
195}
196
197static bool lpm_get_level(void *v, unsigned int *ct)
198{
199 bool ret = false;
200 int it;
201 struct msm_rpmrs_level *level_enter;
202
203 level_enter = container_of(((struct msm_lpm_sleep_data *)v)->limits,
204 struct msm_rpmrs_level, rs_limits);
205 if (level_enter) {
206 for (it = 0; it < lpm_level_count; it++)
207 if (!memcmp(level_enter , lpm_supp_level + it,
208 sizeof(struct msm_rpmrs_level))) {
209 *ct = it;
210 ret = true;
211 break;
212 }
213 }
214 return ret;
215}
216
217static int lpm_callback(struct notifier_block *self, unsigned long cmd,
218 void *sleep_data)
219{
220 static int64_t time;
221 unsigned int ct;
222 struct lpm_level_stat *stats;
223 stats = per_cpu(lpm_levels, cpu_to_debug);
224 /* Update the stats and get the start/stop time */
225 if (cmd == MSM_LPM_STATE_ENTER && !lpm_latency_test) {
226 time = msm_lpm_get_time();
227 stats[lpm_level_iter].entered = true;
228 } else if ((cmd == MSM_LPM_STATE_EXIT) && (time)
229 && (!lpm_latency_test)) {
230 int64_t time1;
231 time1 = msm_lpm_get_time();
232 time = time1 - time;
233
234 if ((time < stats[lpm_level_iter].min_time) ||
235 (!stats[lpm_level_iter].min_time))
236 stats[lpm_level_iter].min_time = time;
237
238 if (time > stats[lpm_level_iter].max_time)
239 stats[lpm_level_iter].max_time = time;
240
241 time1 = stats[lpm_level_iter].avg_time *
242 stats[lpm_level_iter].count + time;
243 do_div(time1, ++(stats[lpm_level_iter].count));
244
245 stats[lpm_level_iter].avg_time = time1;
246 do_div(time, NSEC_PER_USEC);
247 if (time < lpm_supp_level[lpm_level_iter].
248 time_overhead_us)
249 stats[lpm_level_iter].exit_early++;
250 time = 0;
251 } else if (cmd == MSM_LPM_STATE_ENTER && lpm_latency_test) {
252
253 struct msm_lpm_sleep_data *data = sleep_data;
254 if ((lpm_get_level(sleep_data, &ct)) &&
255 (stats[ct].min_threshold == 0) &&
256 data->kernel_sleep <= lpm_sleep_time) {
257
258 stats[ct].min_threshold = lpm_sleep_time;
259 stats[ct].kernel_sleep_time =
260 data->kernel_sleep;
261 }
262 }
263 return 0;
264}
265
266static struct notifier_block lpm_idle_nb = {
267 .notifier_call = lpm_callback,
268};
269
270static void lpm_test_initiate(int lpm_level_test)
271{
272 int test_ret;
273
274 /* This will communicate to 'stat' debugfs to skip latency printing*/
275 lpm_sleep_time = 0;
276 lpm_latency_test = false;
277 /* Unregister any infinitely registered level*/
278 msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
279
280 /* Register/Unregister for Notification */
281 while (lpm_level_iter < lpm_level_count) {
282 test_ret = msm_lpm_register_notifier(cpu_to_debug,
283 lpm_level_iter, &lpm_idle_nb, false);
284 if (test_ret < 0) {
285 pr_err("%s: Registering notifier failed\n", __func__);
286 return;
287 }
288 if (!timer_interval)
289 break;
290 msleep(timer_interval);
291 msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
292 if (lpm_level_test == lpm_level_count)
293 lpm_level_iter++;
294 else
295 break;
296 }
297}
298
299static void lpm_latency_test_initiate(unsigned long max_time)
300{
301 int test_ret;
302 lpm_latency_test = true;
303 lpm_sleep_time = latency_test_interval;
304
305 msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
306 if (max_time > lpm_sleep_time) {
307
308 do {
309 test_ret = msm_lpm_register_notifier(cpu_to_debug,
310 lpm_level_count + 1,
311 &lpm_idle_nb, true);
312 if (test_ret) {
313 pr_err("%s: Registering notifier failed\n",
314 __func__);
315 return;
316 }
317 usleep(lpm_sleep_time);
318 /*Unregister to ensure that we dont update the latency
319 during the timer value transistion*/
320 msm_lpm_unregister_notifier(cpu_to_debug,
321 &lpm_idle_nb);
322 lpm_sleep_time += latency_test_interval;
323 } while (lpm_sleep_time < max_time);
324 } else
325 pr_err("%s: Invalid time interval specified\n", __func__);
326
327 lpm_latency_test = false;
328}
329
330static ssize_t lpm_test_comm_read(struct file *fp, char __user *user_buffer,
331 size_t buffer_length, loff_t *position)
332{
333 int i = 0;
334 int count = buffer_length;
335 int alloc_size = 100 * lpm_level_count;
336 char *temp_buf;
337 char *comm_buf;
338 ssize_t ret;
339
340 comm_buf = kzalloc(alloc_size, GFP_KERNEL);
341 if (!comm_buf) {
342 pr_err("%s:Memory alloc failed\n", __func__);
343 ret = 0;
344 goto com_read_failed;
345 }
346 temp_buf = comm_buf;
347
348 SNPRINTF(temp_buf, count, "Low power modes available:\n");
349
350 for (i = 0; i < lpm_level_count; i++)
351 SNPRINTF(temp_buf, count, "%d. %s\n", i,
352 per_cpu(lpm_levels, cpu_to_debug)[i].level_name);
353
354 SNPRINTF(temp_buf, count, "%d. MSM test all lpm\n", i++);
355 SNPRINTF(temp_buf, count, "%d. MSM determine latency\n", i);
356
357 ret = simple_read_from_buffer(user_buffer, buffer_length - count,
358 position, comm_buf, alloc_size);
359 kfree(comm_buf);
360
361com_read_failed:
362 return ret;
363}
364
365char *trimspaces(char *time_buf)
366{
367 int len;
368 char *tail;
369
370 len = strnlen(time_buf, INPUT_COUNT_BUF);
371 tail = time_buf + len;
372 while (isspace(*time_buf) && (time_buf != tail))
373 time_buf++;
374 if (time_buf == tail) {
375 time_buf = NULL;
376 goto exit_trim_spaces;
377 }
378 len = strnlen(time_buf, INPUT_COUNT_BUF);
379 tail = time_buf + len - 1;
380 while (isspace(*tail) && tail != time_buf) {
381 *tail = '\0';
382 tail--;
383 }
384exit_trim_spaces:
385 return time_buf;
386}
387
388static ssize_t lpm_test_comm_write(struct file *fp, const char __user
389 *user_buffer, size_t count, loff_t *position)
390{
391 ssize_t ret;
392 int str_ret;
393 int lpm_level_test;
394 char *new_ptr;
395 char *comm_buf;
396
397 comm_buf = kzalloc(COMM_BUF_SIZE, GFP_KERNEL);
398 if (!comm_buf) {
399 pr_err("\'%s\': kzalloc failed\n", __func__);
400 return -EINVAL;
401 }
402
403 memset(comm_buf, '\0', COMM_BUF_SIZE);
404
405 ret = simple_write_to_buffer(comm_buf, COMM_BUF_SIZE, position,
406 user_buffer, count);
407 new_ptr = trimspaces(comm_buf);
408 if (!new_ptr) {
409 pr_err("%s: Test case number input invalid\n", __func__);
410 goto write_com_failed;
411 }
412
413 if (!memcmp(comm_buf, LPM_TEST_ALL_LEVELS,
414 sizeof(LPM_TEST_ALL_LEVELS) - 1)) {
415 lpm_level_test = lpm_level_count;
416 lpm_level_iter = 0;
417 lpm_test_initiate(lpm_level_test);
418 goto write_com_success;
419 } else if (!memcmp(comm_buf, LPM_TEST_LATENCIES,
420 sizeof(LPM_TEST_LATENCIES) - 1)) {
421 lpm_level_test = lpm_level_count + 1;
422 lpm_latency_test_initiate(timer_interval * USEC_PER_MSEC);
423 goto write_com_success;
424 } else if (!memcmp(comm_buf, LPM_TEST_CLEAR,
425 sizeof(LPM_TEST_CLEAR) - 1)) {
426 msm_lpm_unregister_notifier(cpu_to_debug, &lpm_idle_nb);
427 goto write_com_success;
428 }
429
430 str_ret = kstrtoint(new_ptr, 10, &lpm_level_test);
431 if ((str_ret) || (lpm_level_test > (lpm_level_count + 1)) ||
432 (lpm_level_test < 0))
433 goto write_com_failed;
434
435 lpm_level_iter = lpm_level_test;
436 lpm_test_initiate(lpm_level_test);
437 goto write_com_success;
438
439write_com_failed:
440 ret = -EINVAL;
441write_com_success:
442 kfree(comm_buf);
443 return ret;
444}
445
446static ssize_t lpm_test_stat_read(struct file *fp, char __user *user_buffer,
447 size_t buffer_length, loff_t *position)
448{
449 int i = 0;
450 int j = 0;
451 int count = buffer_length;
452 char *stat_buf;
453 char *stat_buf_start;
454 size_t stat_buf_size;
455 ssize_t ret;
456 int64_t min_ns;
457 int64_t max_ns;
458 int64_t avg_ns;
459 uint32_t min_ms;
460 uint32_t max_ms;
461 uint32_t avg_ms;
462
463 stat_buf_size = ((sizeof(struct lpm_level_stat) * lpm_level_count) +
464 STAT_BUF_EXTRA_SIZE);
465 stat_buf = kzalloc(stat_buf_size, GFP_KERNEL);
466 if (!stat_buf) {
467 pr_err("\'%s\': kzalloc failed\n", __func__);
468 return -EINVAL;
469 }
470 stat_buf_start = stat_buf;
471 mutex_lock(&lpm_stats_mutex);
472 memset(stat_buf, '\0', stat_buf_size);
473 SNPRINTF(stat_buf, count, "\n\nStats for CPU: %d\nTotal Levels: %d\n",
474 cpu_to_debug, lpm_level_count);
475 if (!lpm_sleep_time) {
476 SNPRINTF(stat_buf, count, "Level(s) failed: ");
477 for (i = 0 ; i < lpm_level_count; i++) {
478 if (per_cpu(lpm_levels, cpu_to_debug)[i].entered)
479 continue;
480 else {
481 SNPRINTF(stat_buf, count,
482 "\n%d. %s", ++j, per_cpu(lpm_levels,
483 cpu_to_debug)[i].level_name);
484 }
485 }
486 SNPRINTF(stat_buf, count, "\n\nSTATS:");
487 for (i = 0; i < lpm_level_count; i++) {
488 min_ns = per_cpu(lpm_levels, cpu_to_debug)[i].min_time;
489 min_ms = do_div(min_ns, NSEC_PER_MSEC);
490 max_ns = per_cpu(lpm_levels, cpu_to_debug)[i].max_time;
491 max_ms = do_div(max_ns, NSEC_PER_MSEC);
492 avg_ns = per_cpu(lpm_levels, cpu_to_debug)[i].avg_time;
493 avg_ms = do_div(avg_ns, NSEC_PER_MSEC);
494 SNPRINTF(stat_buf, count, "\nLEVEL: %s\n"
495 "Entered : %lld\n"
496 "Early wakeup : %lld\n"
497 "Min Time (mSec): %lld.%06u\n"
498 "Max Time (mSec): %lld.%06u\n"
499 "Avg Time (mSec): %lld.%06u\n",
500 per_cpu(lpm_levels, cpu_to_debug)[i].level_name,
501 per_cpu(lpm_levels, cpu_to_debug)[i].count,
502 per_cpu(lpm_levels, cpu_to_debug)[i].exit_early,
503 min_ns, min_ms,
504 max_ns, max_ms,
505 avg_ns, avg_ms);
506 }
507 } else {
508 for (i = 0; i < lpm_level_count; i++) {
509 SNPRINTF(stat_buf, count, "\nLEVEL: %s\n"
510 "Min Timer value (uSec): %lu\n"
511 "Kernel sleep time (uSec): %u\n",
512 per_cpu(lpm_levels, cpu_to_debug)[i].level_name,
513 per_cpu(lpm_levels, cpu_to_debug)[i].
514 min_threshold,
515 per_cpu(lpm_levels,
516 cpu_to_debug)[i].kernel_sleep_time);
517 }
518 }
519
520 ret = simple_read_from_buffer(user_buffer, buffer_length - count,
521 position, stat_buf_start, stat_buf_size);
522
523 mutex_unlock(&lpm_stats_mutex);
524 kfree(stat_buf_start);
525 return ret;
526}
527
528static ssize_t lpm_test_stat_write(struct file *fp, const char __user
529 *user_buffer, size_t count, loff_t *position)
530{
531 char buf[sizeof(LPM_STATS_RESET)];
532 int ret;
533 int i;
534 struct lpm_level_stat *stats;
535
536 if (count > sizeof(LPM_STATS_RESET)) {
537 ret = -EINVAL;
538 goto write_debug_failed;
539 }
540
541 simple_write_to_buffer(buf, sizeof(LPM_STATS_RESET), position,
542 user_buffer, count);
543
544 if (memcmp(buf, LPM_STATS_RESET, sizeof(LPM_STATS_RESET) - 1)) {
545 ret = -EINVAL;
546 goto write_debug_failed;
547 }
548
549 mutex_lock(&lpm_stats_mutex);
550 stats = per_cpu(lpm_levels, cpu_to_debug);
551 for (i = 0 ; i < lpm_level_count; i++) {
552 stats[i].entered = 0;
553 stats[i].min_time = 0;
554 stats[i].max_time = 0;
555 stats[i].avg_time = 0;
556 stats[i].count = 0;
557 stats[i].exit_early = 0;
558 stats[i].min_threshold = 0;
559 stats[i].kernel_sleep_time = 0;
560 }
561 mutex_unlock(&lpm_stats_mutex);
562 return count;
563write_debug_failed:
564 return ret;
565}
566
567static void lpm_init_rpm_levels(int test_lpm_level_count,
568 struct msm_rpmrs_level *test_levels)
569{
570 int i = 0;
571 unsigned int m_cpu = 0;
572 struct lpm_level_stat *stat_levels = NULL;
573
574 if (test_lpm_level_count < 0)
575 return;
576
577 lpm_level_count = test_lpm_level_count;
578
579 lpm_supp_level = test_levels;
580 for_each_possible_cpu(m_cpu) {
581 stat_levels = kzalloc(sizeof(struct lpm_level_stat) *
582 lpm_level_count, GFP_KERNEL);
583 if (!stat_levels) {
584 for (i = m_cpu - 1; i >= 0; i--)
585 kfree(per_cpu(lpm_levels, i));
586 return;
587 }
588
589 for (i = 0; i < lpm_level_count; i++)
590 lpm_populate_name(&stat_levels[i], &lpm_supp_level[i]);
591
592 per_cpu(lpm_levels, m_cpu) = stat_levels;
593 }
594}
595
596static const struct file_operations fops_stat = {
597 .read = lpm_test_stat_read,
598 .write = lpm_test_stat_write,
599};
600
601static const struct file_operations fops_comm = {
602 .read = lpm_test_comm_read,
603 .write = lpm_test_comm_write,
604};
605
606static int __devinit lpm_test_init(int test_lpm_level_count,
607 struct msm_rpmrs_level *test_levels)
608{
609 int filevalue;
610 int lpm_comm;
611 int ret = -EINVAL;
612 struct dentry *parent_dir = NULL;
613
614 parent_dir = debugfs_create_dir("msm_lpm_debug", NULL);
615 if (!parent_dir) {
616 pr_err("%s: debugfs directory creation failed\n",
617 __func__);
618 goto init_err;
619 }
620
621 lpm_stat = debugfs_create_file("stat",
622 S_IRUGO | S_IWUSR | S_IWGRP, parent_dir,
623 &filevalue, &fops_stat);
624 if (!lpm_stat) {
625 pr_err("%s: lpm_stats debugfs creation failed\n",
626 __func__);
627 goto init_err;
628 }
629
630 lpm_ext_comm = debugfs_create_file("comm",
631 S_IRUGO | S_IWUSR | S_IWGRP, parent_dir, &lpm_comm,
632 &fops_comm);
633 if (!lpm_ext_comm) {
634 pr_err("%s: lpm_comm debugfs creation failed\n",
635 __func__);
636 debugfs_remove(lpm_stat);
637 goto init_err;
638 }
639
640 /*Query RPM resources and allocate the data sturctures*/
641 lpm_init_rpm_levels(test_lpm_level_count, test_levels);
642 ret = 0;
643
644init_err:
645 return ret;
646}
647
648static int __devexit lpm_test_exit(struct platform_device *pdev)
649{
650 unsigned int m_cpu = 0;
651
652 kfree(lpm_supp_level);
653 for_each_possible_cpu(m_cpu)
654 kfree(per_cpu(lpm_levels, m_cpu));
655 debugfs_remove(lpm_stat);
656 debugfs_remove(lpm_ext_comm);
657 return 0;
658}
659
660static int __devinit lpm_test_probe(struct platform_device *pdev)
661{
662 struct device *dev = &pdev->dev;
663 struct lpm_test_platform_data *pdata;
664 struct msm_rpmrs_level *test_levels;
665 int test_lpm_level_count;
666
667 pdata = pdev->dev.platform_data;
668
669 if (!pdata) {
670 dev_err(dev, "no platform data specified\n");
671 return -EINVAL;
672 }
673
674 test_levels = pdata->msm_lpm_test_levels;
675 test_lpm_level_count = pdata->msm_lpm_test_level_count;
676
677 if (pdata->use_qtimer)
678 msm_lpm_use_qtimer = true;
679
680 lpm_test_init(test_lpm_level_count, test_levels);
681
682 return 0;
683}
684
685static struct platform_driver lpm_test_driver = {
686 .probe = lpm_test_probe,
687 .remove = lpm_test_exit,
688 .driver = {
689 .name = "lpm_test",
690 .owner = THIS_MODULE,
691 },
692};
693
694static int __init lpm_test_platform_driver_init(void)
695{
696 return platform_driver_register(&lpm_test_driver);
697}
698
699late_initcall(lpm_test_platform_driver_init);