blob: a65b3ee2a3a66c9b07053ae270c8fbdf354e4c09 [file] [log] [blame]
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -07001 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "mpd %s: " fmt, __func__
14
15#include <linux/cpumask.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/kthread.h>
21#include <linux/kobject.h>
22#include <linux/ktime.h>
23#include <linux/hrtimer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/cpu.h>
27#include <linux/stringify.h>
28#include <linux/sched.h>
29#include <linux/platform_device.h>
30#include <linux/debugfs.h>
31#include <linux/cpu_pm.h>
32#include <linux/cpu.h>
33#include <linux/cpufreq.h>
34#include <linux/sched.h>
35#include <linux/rq_stats.h>
36#include <asm/atomic.h>
37#include <asm/page.h>
38#include <mach/msm_dcvs.h>
39#include <mach/msm_dcvs_scm.h>
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -070040#define CREATE_TRACE_POINTS
41#include <trace/events/mpdcvs_trace.h>
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070042
43#define DEFAULT_RQ_AVG_POLL_MS (1)
44
45struct mpd_attrib {
46 struct kobj_attribute enabled;
47 struct kobj_attribute rq_avg_poll_ms;
48 struct kobj_attribute iowait_threshold_pct;
49
50 struct kobj_attribute em_win_size_min_us;
51 struct kobj_attribute em_win_size_max_us;
52 struct kobj_attribute em_max_util_pct;
53 struct kobj_attribute mp_em_rounding_point_min;
54 struct kobj_attribute mp_em_rounding_point_max;
55 struct kobj_attribute online_util_pct_min;
56 struct kobj_attribute online_util_pct_max;
57 struct kobj_attribute slack_time_min_us;
58 struct kobj_attribute slack_time_max_us;
59 struct kobj_attribute hp_up_max_ms;
60 struct kobj_attribute hp_up_ms;
61 struct kobj_attribute hp_up_count;
62 struct kobj_attribute hp_dw_max_ms;
63 struct kobj_attribute hp_dw_ms;
64 struct kobj_attribute hp_dw_count;
65 struct attribute_group attrib_group;
66};
67
68struct msm_mpd_scm_data {
69 enum msm_dcvs_scm_event event;
70 int nr;
71};
72
73struct mpdecision {
74 uint32_t enabled;
75 atomic_t algo_cpu_mask;
76 uint32_t rq_avg_poll_ms;
77 uint32_t iowait_threshold_pct;
78 ktime_t next_update;
79 uint32_t slack_us;
80 struct msm_mpd_algo_param mp_param;
81 struct mpd_attrib attrib;
82 struct mutex lock;
83 struct task_struct *task;
84 struct task_struct *hptask;
85 struct hrtimer slack_timer;
86 struct msm_mpd_scm_data data;
87 int hpupdate;
88 wait_queue_head_t wait_q;
89 wait_queue_head_t wait_hpq;
90};
91
92struct hp_latency {
93 int hp_up_max_ms;
94 int hp_up_ms;
95 int hp_up_count;
96 int hp_dw_max_ms;
97 int hp_dw_ms;
98 int hp_dw_count;
99};
100
101static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
102static DEFINE_SPINLOCK(rq_avg_lock);
103
104enum {
105 MSM_MPD_DEBUG_NOTIFIER = BIT(0),
106 MSM_MPD_CORE_STATUS = BIT(1),
107 MSM_MPD_SLACK_TIMER = BIT(2),
108};
109
110enum {
111 HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
112 HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
113 HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
114};
115
116static int msm_mpd_enabled = 1;
117module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
118
119static struct dentry *debugfs_base;
120static struct mpdecision msm_mpd;
121
122static struct hp_latency hp_latencies;
123
124static unsigned long last_nr;
125static int num_present_hundreds;
126
127#define RQ_AVG_INSIGNIFICANT_BITS 3
128static bool ok_to_update_tz(int nr, int last_nr)
129{
130 /*
131 * Exclude unnecessary TZ reports if run queue haven't changed much from
132 * the last reported value. The left shift by INSIGNIFICANT_BITS is to
133 * filter out small changes in the run queue average which won't cause
134 * a online cpu mask change. Also if the cpu online count does not match
135 * the count requested by TZ and we are not in the process of bringing
136 * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
137 */
138 return
139 (((nr >> RQ_AVG_INSIGNIFICANT_BITS)
140 != (last_nr >> RQ_AVG_INSIGNIFICANT_BITS))
141 || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
142 != num_online_cpus())
143 && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
144}
145
146static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
147{
148 int nr, nr_iowait;
149 ktime_t curr_time = ktime_get();
150 unsigned long flags;
151 int cpu = smp_processor_id();
152 enum hrtimer_restart restart = HRTIMER_RESTART;
153
154 spin_lock_irqsave(&rq_avg_lock, flags);
155 /* If running on the wrong cpu, don't restart */
156 if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
157 restart = HRTIMER_NORESTART;
158
159 if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
160 goto out;
161
162 msm_mpd.next_update = ktime_add_ns(curr_time,
163 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
164
165 sched_get_nr_running_avg(&nr, &nr_iowait);
166
167 if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
168 nr = last_nr;
169
170 if (nr > num_present_hundreds)
171 nr = num_present_hundreds;
172
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700173 trace_msm_mp_runq("nr_running", nr);
174
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700175 if (ok_to_update_tz(nr, last_nr)) {
176 hrtimer_try_to_cancel(&msm_mpd.slack_timer);
177 msm_mpd.data.nr = nr;
178 msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
179 wake_up(&msm_mpd.wait_q);
180 last_nr = nr;
181 }
182
183out:
184 hrtimer_set_expires(timer, msm_mpd.next_update);
185 spin_unlock_irqrestore(&rq_avg_lock, flags);
186 /* set next expiration */
187 return restart;
188}
189
190static void bring_up_cpu(int cpu)
191{
192 int cpu_action_time_ms;
193 int time_taken_ms;
194 int ret, ret1, ret2;
195
196 cpu_action_time_ms = ktime_to_ms(ktime_get());
197 ret = cpu_up(cpu);
198 if (ret) {
199 pr_debug("Error %d online core %d\n", ret, cpu);
200 } else {
201 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
202 if (time_taken_ms > hp_latencies.hp_up_max_ms)
203 hp_latencies.hp_up_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700204 hp_latencies.hp_up_ms += time_taken_ms;
205 hp_latencies.hp_up_count++;
206 ret = msm_dcvs_scm_event(
207 CPU_OFFSET + cpu,
208 MSM_DCVS_SCM_CORE_ONLINE,
209 cpufreq_get(cpu),
210 (uint32_t) time_taken_ms * USEC_PER_MSEC,
211 &ret1, &ret2);
212 if (ret)
213 pr_err("Error sending hotplug scm event err=%d\n", ret);
214 }
215}
216
217static void bring_down_cpu(int cpu)
218{
219 int cpu_action_time_ms;
220 int time_taken_ms;
221 int ret, ret1, ret2;
222
223 BUG_ON(cpu == 0);
224 cpu_action_time_ms = ktime_to_ms(ktime_get());
225 ret = cpu_down(cpu);
226 if (ret) {
227 pr_debug("Error %d offline" "core %d\n", ret, cpu);
228 } else {
229 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
230 if (time_taken_ms > hp_latencies.hp_dw_max_ms)
231 hp_latencies.hp_dw_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700232 hp_latencies.hp_dw_ms += time_taken_ms;
233 hp_latencies.hp_dw_count++;
234 ret = msm_dcvs_scm_event(
235 CPU_OFFSET + cpu,
236 MSM_DCVS_SCM_CORE_OFFLINE,
237 (uint32_t) time_taken_ms * USEC_PER_MSEC,
238 0,
239 &ret1, &ret2);
240 if (ret)
241 pr_err("Error sending hotplug scm event err=%d\n", ret);
242 }
243}
244
245static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
246{
247 int ret = 0;
248 uint32_t req_cpu_mask = 0;
249 uint32_t slack_us = 0;
250 uint32_t param0 = 0;
251
252 if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
253 param0 = nr;
254
255 ret = msm_dcvs_scm_event(0, event, param0, 0,
256 &req_cpu_mask, &slack_us);
257
258 if (ret) {
259 pr_err("Error (%d) sending event %d, param %d\n", ret, event,
260 param0);
261 return ret;
262 }
263
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700264 trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
265 trace_msm_mp_slacktime("slack_time_mp", slack_us);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700266 msm_mpd.slack_us = slack_us;
267 atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
268 msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
269 wake_up(&msm_mpd.wait_hpq);
270
271 /* Start MP Decision slack timer */
272 if (slack_us) {
273 hrtimer_cancel(&msm_mpd.slack_timer);
274 ret = hrtimer_start(&msm_mpd.slack_timer,
275 ktime_set(0, slack_us * NSEC_PER_USEC),
276 HRTIMER_MODE_REL_PINNED);
277 if (ret)
278 pr_err("Failed to register slack timer (%d) %d\n",
279 slack_us, ret);
280 }
281
282 return ret;
283}
284
285static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
286{
287 unsigned long flags;
288
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700289 trace_printk("mpd:slack_timer_fired!\n");
290
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700291 spin_lock_irqsave(&rq_avg_lock, flags);
292 if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
293 goto out;
294
295 msm_mpd.data.nr = 0;
296 msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
297 wake_up(&msm_mpd.wait_q);
298out:
299 spin_unlock_irqrestore(&rq_avg_lock, flags);
300 return HRTIMER_NORESTART;
301}
302
303static int msm_mpd_idle_notifier(struct notifier_block *self,
304 unsigned long cmd, void *v)
305{
306 int cpu = smp_processor_id();
307 unsigned long flags;
308
309 switch (cmd) {
310 case CPU_PM_EXIT:
311 spin_lock_irqsave(&rq_avg_lock, flags);
312 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
313 msm_mpd.next_update,
314 HRTIMER_MODE_ABS_PINNED);
315 spin_unlock_irqrestore(&rq_avg_lock, flags);
316 break;
317 case CPU_PM_ENTER:
318 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
319 break;
320 default:
321 break;
322 }
323
324 return NOTIFY_OK;
325}
326
327static int msm_mpd_hotplug_notifier(struct notifier_block *self,
328 unsigned long action, void *hcpu)
329{
330 int cpu = (int)hcpu;
331 unsigned long flags;
332
333 switch (action & (~CPU_TASKS_FROZEN)) {
334 case CPU_STARTING:
335 spin_lock_irqsave(&rq_avg_lock, flags);
336 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
337 msm_mpd.next_update,
338 HRTIMER_MODE_ABS_PINNED);
339 spin_unlock_irqrestore(&rq_avg_lock, flags);
340 break;
341 default:
342 break;
343 }
344
345 return NOTIFY_OK;
346}
347
348static struct notifier_block msm_mpd_idle_nb = {
349 .notifier_call = msm_mpd_idle_notifier,
350};
351
352static struct notifier_block msm_mpd_hotplug_nb = {
353 .notifier_call = msm_mpd_hotplug_notifier,
354};
355
356static int __cpuinit msm_mpd_do_hotplug(void *data)
357{
358 int *event = (int *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700359 int cpu;
360
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700361 while (1) {
362 wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
363 if (kthread_should_stop())
364 break;
365
366 msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
367 /*
368 * Bring online any offline cores, then offline any online
369 * cores. Whenever a core is off/onlined restart the procedure
370 * in case a new core is desired to be brought online in the
371 * mean time.
372 */
373restart:
374 for_each_possible_cpu(cpu) {
375 if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
376 && !cpu_online(cpu)) {
377 bring_up_cpu(cpu);
378 if (cpu_online(cpu))
379 goto restart;
380 }
381 }
382
383 for_each_possible_cpu(cpu) {
384 if (!(atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
385 && cpu_online(cpu)) {
386 bring_down_cpu(cpu);
387 if (!cpu_online(cpu))
388 goto restart;
389 }
390 }
391 msm_mpd.hpupdate = HPUPDATE_WAITING;
392 }
393
394 return 0;
395}
396
397static int msm_mpd_do_update_scm(void *data)
398{
399 struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700400 unsigned long flags;
401 enum msm_dcvs_scm_event event;
402 int nr;
403
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700404 while (1) {
405 wait_event(msm_mpd.wait_q,
406 msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
407 || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
408 || kthread_should_stop());
409
410 if (kthread_should_stop())
411 break;
412
413 spin_lock_irqsave(&rq_avg_lock, flags);
414 event = scm_data->event;
415 nr = scm_data->nr;
416 scm_data->event = 0;
417 scm_data->nr = 0;
418 spin_unlock_irqrestore(&rq_avg_lock, flags);
419
420 msm_mpd_update_scm(event, nr);
421 }
422 return 0;
423}
424
425static int __ref msm_mpd_set_enabled(uint32_t enable)
426{
427 int ret = 0;
428 int ret0 = 0;
429 int ret1 = 0;
430 int cpu;
431 static uint32_t last_enable;
432
433 enable = (enable > 0) ? 1 : 0;
434 if (last_enable == enable)
435 return ret;
436
437 if (enable) {
438 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
439 if (ret) {
440 pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
441 ret);
442 return ret;
443 }
444 }
445
446 ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
447 &ret0, &ret1);
448 if (ret) {
449 pr_err("Error(%d) %s MP Decision\n",
450 ret, (enable ? "enabling" : "disabling"));
451 } else {
452 last_enable = enable;
453 last_nr = 0;
454 }
455 if (enable) {
456 msm_mpd.next_update = ktime_add_ns(ktime_get(),
457 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
458 msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
459 &msm_mpd.data, "msm_mpdecision");
460 if (IS_ERR(msm_mpd.task))
461 return -EFAULT;
462
463 msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
464 &msm_mpd.hpupdate, "msm_hp");
465 if (IS_ERR(msm_mpd.hptask))
466 return -EFAULT;
467
468 for_each_online_cpu(cpu)
469 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
470 msm_mpd.next_update,
471 HRTIMER_MODE_ABS_PINNED);
472 cpu_pm_register_notifier(&msm_mpd_idle_nb);
473 register_cpu_notifier(&msm_mpd_hotplug_nb);
474 msm_mpd.enabled = 1;
475 } else {
476 for_each_online_cpu(cpu)
477 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
478 kthread_stop(msm_mpd.hptask);
479 kthread_stop(msm_mpd.task);
480 cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
481 unregister_cpu_notifier(&msm_mpd_hotplug_nb);
482 msm_mpd.enabled = 0;
483 }
484
485 return ret;
486}
487
488static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
489{
490 /*
491 * No need to do anything. Just let the timer set its own next poll
492 * interval when it next fires.
493 */
494 msm_mpd.rq_avg_poll_ms = val;
495 return 0;
496}
497
498static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
499{
500 /*
501 * No need to do anything. Just let the timer set its own next poll
502 * interval when it next fires.
503 */
504 msm_mpd.iowait_threshold_pct = val;
505 return 0;
506}
507
508#define MPD_ALGO_PARAM(_name, _param) \
509static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
510 struct kobj_attribute *attr, char *buf) \
511{ \
512 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
513} \
514static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
515 struct kobj_attribute *attr, const char *buf, size_t count) \
516{ \
517 int ret = 0; \
518 uint32_t val; \
519 uint32_t old_val; \
520 mutex_lock(&msm_mpd.lock); \
521 ret = kstrtouint(buf, 10, &val); \
522 if (ret) { \
523 pr_err("Invalid input %s for %s %d\n", \
524 buf, __stringify(_name), ret);\
525 return 0; \
526 } \
527 old_val = _param; \
528 _param = val; \
529 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
530 if (ret) { \
531 pr_err("Error %d returned when setting algo param %s to %d\n",\
532 ret, __stringify(_name), val); \
533 _param = old_val; \
534 } \
535 mutex_unlock(&msm_mpd.lock); \
536 return count; \
537}
538
539#define MPD_PARAM(_name, _param) \
540static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
541 struct kobj_attribute *attr, char *buf) \
542{ \
543 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
544} \
545static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
546 struct kobj_attribute *attr, const char *buf, size_t count) \
547{ \
548 int ret = 0; \
549 uint32_t val; \
550 uint32_t old_val; \
551 mutex_lock(&msm_mpd.lock); \
552 ret = kstrtouint(buf, 10, &val); \
553 if (ret) { \
554 pr_err("Invalid input %s for %s %d\n", \
555 buf, __stringify(_name), ret);\
556 return 0; \
557 } \
558 old_val = _param; \
559 ret = msm_mpd_set_##_name(val); \
560 if (ret) { \
561 pr_err("Error %d returned when setting algo param %s to %d\n",\
562 ret, __stringify(_name), val); \
563 _param = old_val; \
564 } \
565 mutex_unlock(&msm_mpd.lock); \
566 return count; \
567}
568
569#define MPD_RW_ATTRIB(i, _name) \
570 msm_mpd.attrib._name.attr.name = __stringify(_name); \
571 msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
572 msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
573 msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
574 msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
575
576MPD_PARAM(enabled, msm_mpd.enabled);
577MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
578MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
579MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
580MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
581MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
582MPD_ALGO_PARAM(mp_em_rounding_point_min,
583 msm_mpd.mp_param.mp_em_rounding_point_min);
584MPD_ALGO_PARAM(mp_em_rounding_point_max,
585 msm_mpd.mp_param.mp_em_rounding_point_max);
586MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
587MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
588MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
589MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
590MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
591MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
592MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
593MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
594MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
595MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
596
597static int __devinit msm_mpd_probe(struct platform_device *pdev)
598{
599 struct kobject *module_kobj = NULL;
600 int ret = 0;
601 const int attr_count = 19;
602 struct msm_mpd_algo_param *param = NULL;
603
604 param = pdev->dev.platform_data;
605
606 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
607 if (!module_kobj) {
608 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
609 ret = -ENOENT;
610 goto done;
611 }
612
613 msm_mpd.attrib.attrib_group.attrs =
614 kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
615 if (!msm_mpd.attrib.attrib_group.attrs) {
616 ret = -ENOMEM;
617 goto done;
618 }
619
620 MPD_RW_ATTRIB(0, enabled);
621 MPD_RW_ATTRIB(1, rq_avg_poll_ms);
622 MPD_RW_ATTRIB(2, iowait_threshold_pct);
623 MPD_RW_ATTRIB(3, em_win_size_min_us);
624 MPD_RW_ATTRIB(4, em_win_size_max_us);
625 MPD_RW_ATTRIB(5, em_max_util_pct);
626 MPD_RW_ATTRIB(6, mp_em_rounding_point_min);
627 MPD_RW_ATTRIB(7, mp_em_rounding_point_max);
628 MPD_RW_ATTRIB(8, online_util_pct_min);
629 MPD_RW_ATTRIB(9, online_util_pct_max);
630 MPD_RW_ATTRIB(10, slack_time_min_us);
631 MPD_RW_ATTRIB(11, slack_time_max_us);
632 MPD_RW_ATTRIB(12, hp_up_max_ms);
633 MPD_RW_ATTRIB(13, hp_up_ms);
634 MPD_RW_ATTRIB(14, hp_up_count);
635 MPD_RW_ATTRIB(15, hp_dw_max_ms);
636 MPD_RW_ATTRIB(16, hp_dw_ms);
637 MPD_RW_ATTRIB(17, hp_dw_count);
638
639 msm_mpd.attrib.attrib_group.attrs[18] = NULL;
640 ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
641 if (ret)
642 pr_err("Unable to create sysfs objects :%d\n", ret);
643
644 msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
645
646 memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
647
648 debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
649 if (!debugfs_base) {
650 pr_err("Cannot create debugfs base msm_mpdecision\n");
651 ret = -ENOENT;
652 goto done;
653 }
654
655done:
656 if (ret && debugfs_base)
657 debugfs_remove(debugfs_base);
658
659 return ret;
660}
661
662static int __devexit msm_mpd_remove(struct platform_device *pdev)
663{
664 platform_set_drvdata(pdev, NULL);
665
666 return 0;
667}
668
669static struct platform_driver msm_mpd_driver = {
670 .probe = msm_mpd_probe,
671 .remove = __devexit_p(msm_mpd_remove),
672 .driver = {
673 .name = "msm_mpdecision",
674 .owner = THIS_MODULE,
675 },
676};
677
678static int __init msm_mpdecision_init(void)
679{
680 int cpu;
681 if (!msm_mpd_enabled) {
682 pr_info("Not enabled\n");
683 return 0;
684 }
685
686 num_present_hundreds = 100 * num_present_cpus();
687
688 hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
689 HRTIMER_MODE_REL_PINNED);
690 msm_mpd.slack_timer.function = msm_mpd_slack_timer;
691
692 for_each_possible_cpu(cpu) {
693 hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
694 CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
695 per_cpu(rq_avg_poll_timer, cpu).function
696 = msm_mpd_rq_avg_poll_timer;
697 }
698 mutex_init(&msm_mpd.lock);
699 init_waitqueue_head(&msm_mpd.wait_q);
700 init_waitqueue_head(&msm_mpd.wait_hpq);
701 return platform_driver_register(&msm_mpd_driver);
702}
703late_initcall(msm_mpdecision_init);