blob: d906b51d57404061bebac8ac2c1011d90f3c960c [file] [log] [blame]
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -07001 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "mpd %s: " fmt, __func__
14
15#include <linux/cpumask.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/kthread.h>
21#include <linux/kobject.h>
22#include <linux/ktime.h>
23#include <linux/hrtimer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/cpu.h>
27#include <linux/stringify.h>
28#include <linux/sched.h>
29#include <linux/platform_device.h>
30#include <linux/debugfs.h>
31#include <linux/cpu_pm.h>
32#include <linux/cpu.h>
33#include <linux/cpufreq.h>
34#include <linux/sched.h>
35#include <linux/rq_stats.h>
36#include <asm/atomic.h>
37#include <asm/page.h>
38#include <mach/msm_dcvs.h>
39#include <mach/msm_dcvs_scm.h>
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -070040#define CREATE_TRACE_POINTS
41#include <trace/events/mpdcvs_trace.h>
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070042
43#define DEFAULT_RQ_AVG_POLL_MS (1)
44
45struct mpd_attrib {
46 struct kobj_attribute enabled;
47 struct kobj_attribute rq_avg_poll_ms;
48 struct kobj_attribute iowait_threshold_pct;
49
50 struct kobj_attribute em_win_size_min_us;
51 struct kobj_attribute em_win_size_max_us;
52 struct kobj_attribute em_max_util_pct;
53 struct kobj_attribute mp_em_rounding_point_min;
54 struct kobj_attribute mp_em_rounding_point_max;
55 struct kobj_attribute online_util_pct_min;
56 struct kobj_attribute online_util_pct_max;
57 struct kobj_attribute slack_time_min_us;
58 struct kobj_attribute slack_time_max_us;
59 struct kobj_attribute hp_up_max_ms;
60 struct kobj_attribute hp_up_ms;
61 struct kobj_attribute hp_up_count;
62 struct kobj_attribute hp_dw_max_ms;
63 struct kobj_attribute hp_dw_ms;
64 struct kobj_attribute hp_dw_count;
65 struct attribute_group attrib_group;
66};
67
68struct msm_mpd_scm_data {
69 enum msm_dcvs_scm_event event;
70 int nr;
71};
72
73struct mpdecision {
74 uint32_t enabled;
75 atomic_t algo_cpu_mask;
76 uint32_t rq_avg_poll_ms;
77 uint32_t iowait_threshold_pct;
78 ktime_t next_update;
79 uint32_t slack_us;
80 struct msm_mpd_algo_param mp_param;
81 struct mpd_attrib attrib;
82 struct mutex lock;
83 struct task_struct *task;
84 struct task_struct *hptask;
85 struct hrtimer slack_timer;
86 struct msm_mpd_scm_data data;
87 int hpupdate;
88 wait_queue_head_t wait_q;
89 wait_queue_head_t wait_hpq;
90};
91
92struct hp_latency {
93 int hp_up_max_ms;
94 int hp_up_ms;
95 int hp_up_count;
96 int hp_dw_max_ms;
97 int hp_dw_ms;
98 int hp_dw_count;
99};
100
101static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
102static DEFINE_SPINLOCK(rq_avg_lock);
103
104enum {
105 MSM_MPD_DEBUG_NOTIFIER = BIT(0),
106 MSM_MPD_CORE_STATUS = BIT(1),
107 MSM_MPD_SLACK_TIMER = BIT(2),
108};
109
110enum {
111 HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
112 HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
113 HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
114};
115
116static int msm_mpd_enabled = 1;
117module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
118
119static struct dentry *debugfs_base;
120static struct mpdecision msm_mpd;
121
122static struct hp_latency hp_latencies;
123
124static unsigned long last_nr;
125static int num_present_hundreds;
Steve Muckle9c0aa032012-10-15 16:17:55 -0700126static ktime_t last_down_time;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700127
128#define RQ_AVG_INSIGNIFICANT_BITS 3
129static bool ok_to_update_tz(int nr, int last_nr)
130{
131 /*
132 * Exclude unnecessary TZ reports if run queue haven't changed much from
133 * the last reported value. The left shift by INSIGNIFICANT_BITS is to
134 * filter out small changes in the run queue average which won't cause
135 * a online cpu mask change. Also if the cpu online count does not match
136 * the count requested by TZ and we are not in the process of bringing
137 * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
138 */
139 return
140 (((nr >> RQ_AVG_INSIGNIFICANT_BITS)
141 != (last_nr >> RQ_AVG_INSIGNIFICANT_BITS))
142 || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
143 != num_online_cpus())
144 && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
145}
146
147static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
148{
149 int nr, nr_iowait;
150 ktime_t curr_time = ktime_get();
151 unsigned long flags;
152 int cpu = smp_processor_id();
153 enum hrtimer_restart restart = HRTIMER_RESTART;
154
155 spin_lock_irqsave(&rq_avg_lock, flags);
156 /* If running on the wrong cpu, don't restart */
157 if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
158 restart = HRTIMER_NORESTART;
159
160 if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
161 goto out;
162
163 msm_mpd.next_update = ktime_add_ns(curr_time,
164 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
165
166 sched_get_nr_running_avg(&nr, &nr_iowait);
167
168 if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
169 nr = last_nr;
170
171 if (nr > num_present_hundreds)
172 nr = num_present_hundreds;
173
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700174 trace_msm_mp_runq("nr_running", nr);
175
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700176 if (ok_to_update_tz(nr, last_nr)) {
177 hrtimer_try_to_cancel(&msm_mpd.slack_timer);
178 msm_mpd.data.nr = nr;
179 msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
180 wake_up(&msm_mpd.wait_q);
181 last_nr = nr;
182 }
183
184out:
185 hrtimer_set_expires(timer, msm_mpd.next_update);
186 spin_unlock_irqrestore(&rq_avg_lock, flags);
187 /* set next expiration */
188 return restart;
189}
190
191static void bring_up_cpu(int cpu)
192{
193 int cpu_action_time_ms;
194 int time_taken_ms;
195 int ret, ret1, ret2;
196
197 cpu_action_time_ms = ktime_to_ms(ktime_get());
198 ret = cpu_up(cpu);
199 if (ret) {
200 pr_debug("Error %d online core %d\n", ret, cpu);
201 } else {
202 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
203 if (time_taken_ms > hp_latencies.hp_up_max_ms)
204 hp_latencies.hp_up_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700205 hp_latencies.hp_up_ms += time_taken_ms;
206 hp_latencies.hp_up_count++;
207 ret = msm_dcvs_scm_event(
208 CPU_OFFSET + cpu,
209 MSM_DCVS_SCM_CORE_ONLINE,
210 cpufreq_get(cpu),
211 (uint32_t) time_taken_ms * USEC_PER_MSEC,
212 &ret1, &ret2);
213 if (ret)
214 pr_err("Error sending hotplug scm event err=%d\n", ret);
215 }
216}
217
218static void bring_down_cpu(int cpu)
219{
220 int cpu_action_time_ms;
221 int time_taken_ms;
222 int ret, ret1, ret2;
223
224 BUG_ON(cpu == 0);
225 cpu_action_time_ms = ktime_to_ms(ktime_get());
226 ret = cpu_down(cpu);
227 if (ret) {
228 pr_debug("Error %d offline" "core %d\n", ret, cpu);
229 } else {
230 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
231 if (time_taken_ms > hp_latencies.hp_dw_max_ms)
232 hp_latencies.hp_dw_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700233 hp_latencies.hp_dw_ms += time_taken_ms;
234 hp_latencies.hp_dw_count++;
235 ret = msm_dcvs_scm_event(
236 CPU_OFFSET + cpu,
237 MSM_DCVS_SCM_CORE_OFFLINE,
238 (uint32_t) time_taken_ms * USEC_PER_MSEC,
239 0,
240 &ret1, &ret2);
241 if (ret)
242 pr_err("Error sending hotplug scm event err=%d\n", ret);
243 }
244}
245
246static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
247{
248 int ret = 0;
249 uint32_t req_cpu_mask = 0;
250 uint32_t slack_us = 0;
251 uint32_t param0 = 0;
252
253 if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
254 param0 = nr;
255
256 ret = msm_dcvs_scm_event(0, event, param0, 0,
257 &req_cpu_mask, &slack_us);
258
259 if (ret) {
260 pr_err("Error (%d) sending event %d, param %d\n", ret, event,
261 param0);
262 return ret;
263 }
264
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700265 trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
266 trace_msm_mp_slacktime("slack_time_mp", slack_us);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700267 msm_mpd.slack_us = slack_us;
268 atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
269 msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
270 wake_up(&msm_mpd.wait_hpq);
271
272 /* Start MP Decision slack timer */
273 if (slack_us) {
274 hrtimer_cancel(&msm_mpd.slack_timer);
275 ret = hrtimer_start(&msm_mpd.slack_timer,
276 ktime_set(0, slack_us * NSEC_PER_USEC),
277 HRTIMER_MODE_REL_PINNED);
278 if (ret)
279 pr_err("Failed to register slack timer (%d) %d\n",
280 slack_us, ret);
281 }
282
283 return ret;
284}
285
286static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
287{
288 unsigned long flags;
289
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700290 trace_printk("mpd:slack_timer_fired!\n");
291
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700292 spin_lock_irqsave(&rq_avg_lock, flags);
293 if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
294 goto out;
295
296 msm_mpd.data.nr = 0;
297 msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
298 wake_up(&msm_mpd.wait_q);
299out:
300 spin_unlock_irqrestore(&rq_avg_lock, flags);
301 return HRTIMER_NORESTART;
302}
303
304static int msm_mpd_idle_notifier(struct notifier_block *self,
305 unsigned long cmd, void *v)
306{
307 int cpu = smp_processor_id();
308 unsigned long flags;
309
310 switch (cmd) {
311 case CPU_PM_EXIT:
312 spin_lock_irqsave(&rq_avg_lock, flags);
313 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
314 msm_mpd.next_update,
315 HRTIMER_MODE_ABS_PINNED);
316 spin_unlock_irqrestore(&rq_avg_lock, flags);
317 break;
318 case CPU_PM_ENTER:
319 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
320 break;
321 default:
322 break;
323 }
324
325 return NOTIFY_OK;
326}
327
328static int msm_mpd_hotplug_notifier(struct notifier_block *self,
329 unsigned long action, void *hcpu)
330{
331 int cpu = (int)hcpu;
332 unsigned long flags;
333
334 switch (action & (~CPU_TASKS_FROZEN)) {
335 case CPU_STARTING:
336 spin_lock_irqsave(&rq_avg_lock, flags);
337 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
338 msm_mpd.next_update,
339 HRTIMER_MODE_ABS_PINNED);
340 spin_unlock_irqrestore(&rq_avg_lock, flags);
341 break;
342 default:
343 break;
344 }
345
346 return NOTIFY_OK;
347}
348
349static struct notifier_block msm_mpd_idle_nb = {
350 .notifier_call = msm_mpd_idle_notifier,
351};
352
353static struct notifier_block msm_mpd_hotplug_nb = {
354 .notifier_call = msm_mpd_hotplug_notifier,
355};
356
357static int __cpuinit msm_mpd_do_hotplug(void *data)
358{
359 int *event = (int *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700360 int cpu;
361
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700362 while (1) {
363 wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
364 if (kthread_should_stop())
365 break;
366
367 msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
368 /*
369 * Bring online any offline cores, then offline any online
370 * cores. Whenever a core is off/onlined restart the procedure
371 * in case a new core is desired to be brought online in the
372 * mean time.
373 */
374restart:
375 for_each_possible_cpu(cpu) {
376 if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
377 && !cpu_online(cpu)) {
378 bring_up_cpu(cpu);
379 if (cpu_online(cpu))
380 goto restart;
381 }
382 }
383
Steve Muckle9c0aa032012-10-15 16:17:55 -0700384 if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) >
385 100 * NSEC_PER_MSEC)
386 for_each_possible_cpu(cpu)
387 if (!(atomic_read(&msm_mpd.algo_cpu_mask) &
388 (1 << cpu)) && cpu_online(cpu)) {
389 bring_down_cpu(cpu);
390 last_down_time = ktime_get();
391 break;
392 }
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700393 msm_mpd.hpupdate = HPUPDATE_WAITING;
Steve Muckle749f3012012-11-21 10:12:39 -0800394 msm_dcvs_apply_gpu_floor(0);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700395 }
396
397 return 0;
398}
399
400static int msm_mpd_do_update_scm(void *data)
401{
402 struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700403 unsigned long flags;
404 enum msm_dcvs_scm_event event;
405 int nr;
406
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700407 while (1) {
408 wait_event(msm_mpd.wait_q,
409 msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
410 || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
411 || kthread_should_stop());
412
413 if (kthread_should_stop())
414 break;
415
416 spin_lock_irqsave(&rq_avg_lock, flags);
417 event = scm_data->event;
418 nr = scm_data->nr;
419 scm_data->event = 0;
420 scm_data->nr = 0;
421 spin_unlock_irqrestore(&rq_avg_lock, flags);
422
423 msm_mpd_update_scm(event, nr);
424 }
425 return 0;
426}
427
428static int __ref msm_mpd_set_enabled(uint32_t enable)
429{
430 int ret = 0;
431 int ret0 = 0;
432 int ret1 = 0;
433 int cpu;
434 static uint32_t last_enable;
435
436 enable = (enable > 0) ? 1 : 0;
437 if (last_enable == enable)
438 return ret;
439
440 if (enable) {
441 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
442 if (ret) {
443 pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
444 ret);
445 return ret;
446 }
447 }
448
449 ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
450 &ret0, &ret1);
451 if (ret) {
452 pr_err("Error(%d) %s MP Decision\n",
453 ret, (enable ? "enabling" : "disabling"));
454 } else {
455 last_enable = enable;
456 last_nr = 0;
457 }
458 if (enable) {
459 msm_mpd.next_update = ktime_add_ns(ktime_get(),
460 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
461 msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
462 &msm_mpd.data, "msm_mpdecision");
463 if (IS_ERR(msm_mpd.task))
464 return -EFAULT;
465
466 msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
467 &msm_mpd.hpupdate, "msm_hp");
468 if (IS_ERR(msm_mpd.hptask))
469 return -EFAULT;
470
471 for_each_online_cpu(cpu)
472 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
473 msm_mpd.next_update,
474 HRTIMER_MODE_ABS_PINNED);
475 cpu_pm_register_notifier(&msm_mpd_idle_nb);
476 register_cpu_notifier(&msm_mpd_hotplug_nb);
477 msm_mpd.enabled = 1;
478 } else {
479 for_each_online_cpu(cpu)
480 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
481 kthread_stop(msm_mpd.hptask);
482 kthread_stop(msm_mpd.task);
483 cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
484 unregister_cpu_notifier(&msm_mpd_hotplug_nb);
485 msm_mpd.enabled = 0;
486 }
487
488 return ret;
489}
490
491static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
492{
493 /*
494 * No need to do anything. Just let the timer set its own next poll
495 * interval when it next fires.
496 */
497 msm_mpd.rq_avg_poll_ms = val;
498 return 0;
499}
500
501static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
502{
503 /*
504 * No need to do anything. Just let the timer set its own next poll
505 * interval when it next fires.
506 */
507 msm_mpd.iowait_threshold_pct = val;
508 return 0;
509}
510
511#define MPD_ALGO_PARAM(_name, _param) \
512static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
513 struct kobj_attribute *attr, char *buf) \
514{ \
515 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
516} \
517static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
518 struct kobj_attribute *attr, const char *buf, size_t count) \
519{ \
520 int ret = 0; \
521 uint32_t val; \
522 uint32_t old_val; \
523 mutex_lock(&msm_mpd.lock); \
524 ret = kstrtouint(buf, 10, &val); \
525 if (ret) { \
526 pr_err("Invalid input %s for %s %d\n", \
527 buf, __stringify(_name), ret);\
528 return 0; \
529 } \
530 old_val = _param; \
531 _param = val; \
532 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
533 if (ret) { \
534 pr_err("Error %d returned when setting algo param %s to %d\n",\
535 ret, __stringify(_name), val); \
536 _param = old_val; \
537 } \
538 mutex_unlock(&msm_mpd.lock); \
539 return count; \
540}
541
542#define MPD_PARAM(_name, _param) \
543static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
544 struct kobj_attribute *attr, char *buf) \
545{ \
546 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
547} \
548static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
549 struct kobj_attribute *attr, const char *buf, size_t count) \
550{ \
551 int ret = 0; \
552 uint32_t val; \
553 uint32_t old_val; \
554 mutex_lock(&msm_mpd.lock); \
555 ret = kstrtouint(buf, 10, &val); \
556 if (ret) { \
557 pr_err("Invalid input %s for %s %d\n", \
558 buf, __stringify(_name), ret);\
559 return 0; \
560 } \
561 old_val = _param; \
562 ret = msm_mpd_set_##_name(val); \
563 if (ret) { \
564 pr_err("Error %d returned when setting algo param %s to %d\n",\
565 ret, __stringify(_name), val); \
566 _param = old_val; \
567 } \
568 mutex_unlock(&msm_mpd.lock); \
569 return count; \
570}
571
572#define MPD_RW_ATTRIB(i, _name) \
573 msm_mpd.attrib._name.attr.name = __stringify(_name); \
574 msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
575 msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
576 msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
577 msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
578
579MPD_PARAM(enabled, msm_mpd.enabled);
580MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
581MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
582MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
583MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
584MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
585MPD_ALGO_PARAM(mp_em_rounding_point_min,
586 msm_mpd.mp_param.mp_em_rounding_point_min);
587MPD_ALGO_PARAM(mp_em_rounding_point_max,
588 msm_mpd.mp_param.mp_em_rounding_point_max);
589MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
590MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
591MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
592MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
593MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
594MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
595MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
596MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
597MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
598MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
599
600static int __devinit msm_mpd_probe(struct platform_device *pdev)
601{
602 struct kobject *module_kobj = NULL;
603 int ret = 0;
604 const int attr_count = 19;
605 struct msm_mpd_algo_param *param = NULL;
606
607 param = pdev->dev.platform_data;
608
609 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
610 if (!module_kobj) {
611 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
612 ret = -ENOENT;
613 goto done;
614 }
615
616 msm_mpd.attrib.attrib_group.attrs =
617 kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
618 if (!msm_mpd.attrib.attrib_group.attrs) {
619 ret = -ENOMEM;
620 goto done;
621 }
622
623 MPD_RW_ATTRIB(0, enabled);
624 MPD_RW_ATTRIB(1, rq_avg_poll_ms);
625 MPD_RW_ATTRIB(2, iowait_threshold_pct);
626 MPD_RW_ATTRIB(3, em_win_size_min_us);
627 MPD_RW_ATTRIB(4, em_win_size_max_us);
628 MPD_RW_ATTRIB(5, em_max_util_pct);
629 MPD_RW_ATTRIB(6, mp_em_rounding_point_min);
630 MPD_RW_ATTRIB(7, mp_em_rounding_point_max);
631 MPD_RW_ATTRIB(8, online_util_pct_min);
632 MPD_RW_ATTRIB(9, online_util_pct_max);
633 MPD_RW_ATTRIB(10, slack_time_min_us);
634 MPD_RW_ATTRIB(11, slack_time_max_us);
635 MPD_RW_ATTRIB(12, hp_up_max_ms);
636 MPD_RW_ATTRIB(13, hp_up_ms);
637 MPD_RW_ATTRIB(14, hp_up_count);
638 MPD_RW_ATTRIB(15, hp_dw_max_ms);
639 MPD_RW_ATTRIB(16, hp_dw_ms);
640 MPD_RW_ATTRIB(17, hp_dw_count);
641
642 msm_mpd.attrib.attrib_group.attrs[18] = NULL;
643 ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
644 if (ret)
645 pr_err("Unable to create sysfs objects :%d\n", ret);
646
647 msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
648
649 memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
650
651 debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
652 if (!debugfs_base) {
653 pr_err("Cannot create debugfs base msm_mpdecision\n");
654 ret = -ENOENT;
655 goto done;
656 }
657
658done:
659 if (ret && debugfs_base)
660 debugfs_remove(debugfs_base);
661
662 return ret;
663}
664
665static int __devexit msm_mpd_remove(struct platform_device *pdev)
666{
667 platform_set_drvdata(pdev, NULL);
668
669 return 0;
670}
671
672static struct platform_driver msm_mpd_driver = {
673 .probe = msm_mpd_probe,
674 .remove = __devexit_p(msm_mpd_remove),
675 .driver = {
676 .name = "msm_mpdecision",
677 .owner = THIS_MODULE,
678 },
679};
680
681static int __init msm_mpdecision_init(void)
682{
683 int cpu;
684 if (!msm_mpd_enabled) {
685 pr_info("Not enabled\n");
686 return 0;
687 }
688
689 num_present_hundreds = 100 * num_present_cpus();
690
691 hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
692 HRTIMER_MODE_REL_PINNED);
693 msm_mpd.slack_timer.function = msm_mpd_slack_timer;
694
695 for_each_possible_cpu(cpu) {
696 hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
697 CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
698 per_cpu(rq_avg_poll_timer, cpu).function
699 = msm_mpd_rq_avg_poll_timer;
700 }
701 mutex_init(&msm_mpd.lock);
702 init_waitqueue_head(&msm_mpd.wait_q);
703 init_waitqueue_head(&msm_mpd.wait_hpq);
704 return platform_driver_register(&msm_mpd_driver);
705}
706late_initcall(msm_mpdecision_init);