blob: 746bbe80621c6a0e36e71126010f054c9b796dac [file] [log] [blame]
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -07001 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) "mpd %s: " fmt, __func__
14
15#include <linux/cpumask.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/kthread.h>
21#include <linux/kobject.h>
22#include <linux/ktime.h>
23#include <linux/hrtimer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/cpu.h>
27#include <linux/stringify.h>
28#include <linux/sched.h>
29#include <linux/platform_device.h>
30#include <linux/debugfs.h>
31#include <linux/cpu_pm.h>
32#include <linux/cpu.h>
33#include <linux/cpufreq.h>
34#include <linux/sched.h>
35#include <linux/rq_stats.h>
36#include <asm/atomic.h>
37#include <asm/page.h>
38#include <mach/msm_dcvs.h>
39#include <mach/msm_dcvs_scm.h>
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -070040#define CREATE_TRACE_POINTS
41#include <trace/events/mpdcvs_trace.h>
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070042
43#define DEFAULT_RQ_AVG_POLL_MS (1)
Steve Mucklef5d5d542012-12-06 12:09:24 -080044#define DEFAULT_RQ_AVG_DIVIDE (25)
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070045
46struct mpd_attrib {
47 struct kobj_attribute enabled;
48 struct kobj_attribute rq_avg_poll_ms;
Steve Mucklef5d5d542012-12-06 12:09:24 -080049 struct kobj_attribute iowait_threshold_pct;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070050
Steve Mucklef5d5d542012-12-06 12:09:24 -080051 struct kobj_attribute rq_avg_divide;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070052 struct kobj_attribute em_win_size_min_us;
53 struct kobj_attribute em_win_size_max_us;
54 struct kobj_attribute em_max_util_pct;
55 struct kobj_attribute mp_em_rounding_point_min;
56 struct kobj_attribute mp_em_rounding_point_max;
57 struct kobj_attribute online_util_pct_min;
58 struct kobj_attribute online_util_pct_max;
59 struct kobj_attribute slack_time_min_us;
60 struct kobj_attribute slack_time_max_us;
61 struct kobj_attribute hp_up_max_ms;
62 struct kobj_attribute hp_up_ms;
63 struct kobj_attribute hp_up_count;
64 struct kobj_attribute hp_dw_max_ms;
65 struct kobj_attribute hp_dw_ms;
66 struct kobj_attribute hp_dw_count;
67 struct attribute_group attrib_group;
68};
69
70struct msm_mpd_scm_data {
71 enum msm_dcvs_scm_event event;
72 int nr;
73};
74
75struct mpdecision {
76 uint32_t enabled;
77 atomic_t algo_cpu_mask;
78 uint32_t rq_avg_poll_ms;
79 uint32_t iowait_threshold_pct;
Steve Mucklef5d5d542012-12-06 12:09:24 -080080 uint32_t rq_avg_divide;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -070081 ktime_t next_update;
82 uint32_t slack_us;
83 struct msm_mpd_algo_param mp_param;
84 struct mpd_attrib attrib;
85 struct mutex lock;
86 struct task_struct *task;
87 struct task_struct *hptask;
88 struct hrtimer slack_timer;
89 struct msm_mpd_scm_data data;
90 int hpupdate;
91 wait_queue_head_t wait_q;
92 wait_queue_head_t wait_hpq;
93};
94
95struct hp_latency {
96 int hp_up_max_ms;
97 int hp_up_ms;
98 int hp_up_count;
99 int hp_dw_max_ms;
100 int hp_dw_ms;
101 int hp_dw_count;
102};
103
104static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
105static DEFINE_SPINLOCK(rq_avg_lock);
106
107enum {
108 MSM_MPD_DEBUG_NOTIFIER = BIT(0),
109 MSM_MPD_CORE_STATUS = BIT(1),
110 MSM_MPD_SLACK_TIMER = BIT(2),
111};
112
113enum {
114 HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
115 HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
116 HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
117};
118
119static int msm_mpd_enabled = 1;
120module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
121
122static struct dentry *debugfs_base;
123static struct mpdecision msm_mpd;
124
125static struct hp_latency hp_latencies;
126
127static unsigned long last_nr;
128static int num_present_hundreds;
Steve Muckle9c0aa032012-10-15 16:17:55 -0700129static ktime_t last_down_time;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700130
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700131static bool ok_to_update_tz(int nr, int last_nr)
132{
133 /*
134 * Exclude unnecessary TZ reports if run queue haven't changed much from
Steve Mucklef5d5d542012-12-06 12:09:24 -0800135 * the last reported value. The divison by rq_avg_divide is to
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700136 * filter out small changes in the run queue average which won't cause
137 * a online cpu mask change. Also if the cpu online count does not match
138 * the count requested by TZ and we are not in the process of bringing
139 * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
140 */
141 return
Steve Mucklef5d5d542012-12-06 12:09:24 -0800142 (((nr / msm_mpd.rq_avg_divide)
143 != (last_nr / msm_mpd.rq_avg_divide))
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700144 || ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
145 != num_online_cpus())
146 && (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
147}
148
149static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
150{
151 int nr, nr_iowait;
152 ktime_t curr_time = ktime_get();
153 unsigned long flags;
154 int cpu = smp_processor_id();
155 enum hrtimer_restart restart = HRTIMER_RESTART;
156
157 spin_lock_irqsave(&rq_avg_lock, flags);
158 /* If running on the wrong cpu, don't restart */
159 if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
160 restart = HRTIMER_NORESTART;
161
162 if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
163 goto out;
164
165 msm_mpd.next_update = ktime_add_ns(curr_time,
166 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
167
168 sched_get_nr_running_avg(&nr, &nr_iowait);
169
170 if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
171 nr = last_nr;
172
173 if (nr > num_present_hundreds)
174 nr = num_present_hundreds;
175
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700176 trace_msm_mp_runq("nr_running", nr);
177
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700178 if (ok_to_update_tz(nr, last_nr)) {
179 hrtimer_try_to_cancel(&msm_mpd.slack_timer);
180 msm_mpd.data.nr = nr;
181 msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
182 wake_up(&msm_mpd.wait_q);
183 last_nr = nr;
184 }
185
186out:
187 hrtimer_set_expires(timer, msm_mpd.next_update);
188 spin_unlock_irqrestore(&rq_avg_lock, flags);
189 /* set next expiration */
190 return restart;
191}
192
193static void bring_up_cpu(int cpu)
194{
195 int cpu_action_time_ms;
196 int time_taken_ms;
197 int ret, ret1, ret2;
198
199 cpu_action_time_ms = ktime_to_ms(ktime_get());
200 ret = cpu_up(cpu);
201 if (ret) {
202 pr_debug("Error %d online core %d\n", ret, cpu);
203 } else {
204 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
205 if (time_taken_ms > hp_latencies.hp_up_max_ms)
206 hp_latencies.hp_up_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700207 hp_latencies.hp_up_ms += time_taken_ms;
208 hp_latencies.hp_up_count++;
209 ret = msm_dcvs_scm_event(
210 CPU_OFFSET + cpu,
211 MSM_DCVS_SCM_CORE_ONLINE,
212 cpufreq_get(cpu),
213 (uint32_t) time_taken_ms * USEC_PER_MSEC,
214 &ret1, &ret2);
215 if (ret)
216 pr_err("Error sending hotplug scm event err=%d\n", ret);
217 }
218}
219
220static void bring_down_cpu(int cpu)
221{
222 int cpu_action_time_ms;
223 int time_taken_ms;
224 int ret, ret1, ret2;
225
226 BUG_ON(cpu == 0);
227 cpu_action_time_ms = ktime_to_ms(ktime_get());
228 ret = cpu_down(cpu);
229 if (ret) {
230 pr_debug("Error %d offline" "core %d\n", ret, cpu);
231 } else {
232 time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
233 if (time_taken_ms > hp_latencies.hp_dw_max_ms)
234 hp_latencies.hp_dw_max_ms = time_taken_ms;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700235 hp_latencies.hp_dw_ms += time_taken_ms;
236 hp_latencies.hp_dw_count++;
237 ret = msm_dcvs_scm_event(
238 CPU_OFFSET + cpu,
239 MSM_DCVS_SCM_CORE_OFFLINE,
240 (uint32_t) time_taken_ms * USEC_PER_MSEC,
241 0,
242 &ret1, &ret2);
243 if (ret)
244 pr_err("Error sending hotplug scm event err=%d\n", ret);
245 }
246}
247
248static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
249{
250 int ret = 0;
251 uint32_t req_cpu_mask = 0;
252 uint32_t slack_us = 0;
253 uint32_t param0 = 0;
254
255 if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
256 param0 = nr;
257
258 ret = msm_dcvs_scm_event(0, event, param0, 0,
259 &req_cpu_mask, &slack_us);
260
261 if (ret) {
262 pr_err("Error (%d) sending event %d, param %d\n", ret, event,
263 param0);
264 return ret;
265 }
266
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700267 trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
268 trace_msm_mp_slacktime("slack_time_mp", slack_us);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700269 msm_mpd.slack_us = slack_us;
270 atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
271 msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
272 wake_up(&msm_mpd.wait_hpq);
273
274 /* Start MP Decision slack timer */
275 if (slack_us) {
276 hrtimer_cancel(&msm_mpd.slack_timer);
277 ret = hrtimer_start(&msm_mpd.slack_timer,
278 ktime_set(0, slack_us * NSEC_PER_USEC),
279 HRTIMER_MODE_REL_PINNED);
280 if (ret)
281 pr_err("Failed to register slack timer (%d) %d\n",
282 slack_us, ret);
283 }
284
285 return ret;
286}
287
288static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
289{
290 unsigned long flags;
291
Abhijeet Dharmapurikarf17f2d32012-09-13 19:05:13 -0700292 trace_printk("mpd:slack_timer_fired!\n");
293
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700294 spin_lock_irqsave(&rq_avg_lock, flags);
295 if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
296 goto out;
297
298 msm_mpd.data.nr = 0;
299 msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
300 wake_up(&msm_mpd.wait_q);
301out:
302 spin_unlock_irqrestore(&rq_avg_lock, flags);
303 return HRTIMER_NORESTART;
304}
305
306static int msm_mpd_idle_notifier(struct notifier_block *self,
307 unsigned long cmd, void *v)
308{
309 int cpu = smp_processor_id();
310 unsigned long flags;
311
312 switch (cmd) {
313 case CPU_PM_EXIT:
314 spin_lock_irqsave(&rq_avg_lock, flags);
315 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
316 msm_mpd.next_update,
317 HRTIMER_MODE_ABS_PINNED);
318 spin_unlock_irqrestore(&rq_avg_lock, flags);
319 break;
320 case CPU_PM_ENTER:
321 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
322 break;
323 default:
324 break;
325 }
326
327 return NOTIFY_OK;
328}
329
330static int msm_mpd_hotplug_notifier(struct notifier_block *self,
331 unsigned long action, void *hcpu)
332{
333 int cpu = (int)hcpu;
334 unsigned long flags;
335
336 switch (action & (~CPU_TASKS_FROZEN)) {
337 case CPU_STARTING:
338 spin_lock_irqsave(&rq_avg_lock, flags);
339 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
340 msm_mpd.next_update,
341 HRTIMER_MODE_ABS_PINNED);
342 spin_unlock_irqrestore(&rq_avg_lock, flags);
343 break;
344 default:
345 break;
346 }
347
348 return NOTIFY_OK;
349}
350
351static struct notifier_block msm_mpd_idle_nb = {
352 .notifier_call = msm_mpd_idle_notifier,
353};
354
355static struct notifier_block msm_mpd_hotplug_nb = {
356 .notifier_call = msm_mpd_hotplug_notifier,
357};
358
359static int __cpuinit msm_mpd_do_hotplug(void *data)
360{
361 int *event = (int *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700362 int cpu;
363
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700364 while (1) {
Steve Muckleea584452012-12-04 14:05:09 -0800365 msm_dcvs_update_algo_params();
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700366 wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
367 if (kthread_should_stop())
368 break;
369
370 msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
371 /*
372 * Bring online any offline cores, then offline any online
373 * cores. Whenever a core is off/onlined restart the procedure
374 * in case a new core is desired to be brought online in the
375 * mean time.
376 */
377restart:
378 for_each_possible_cpu(cpu) {
379 if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
380 && !cpu_online(cpu)) {
381 bring_up_cpu(cpu);
382 if (cpu_online(cpu))
383 goto restart;
384 }
385 }
386
Steve Muckle9c0aa032012-10-15 16:17:55 -0700387 if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) >
388 100 * NSEC_PER_MSEC)
389 for_each_possible_cpu(cpu)
390 if (!(atomic_read(&msm_mpd.algo_cpu_mask) &
391 (1 << cpu)) && cpu_online(cpu)) {
392 bring_down_cpu(cpu);
393 last_down_time = ktime_get();
394 break;
395 }
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700396 msm_mpd.hpupdate = HPUPDATE_WAITING;
Steve Muckle749f3012012-11-21 10:12:39 -0800397 msm_dcvs_apply_gpu_floor(0);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700398 }
399
400 return 0;
401}
402
403static int msm_mpd_do_update_scm(void *data)
404{
405 struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700406 unsigned long flags;
407 enum msm_dcvs_scm_event event;
408 int nr;
409
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700410 while (1) {
411 wait_event(msm_mpd.wait_q,
412 msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
413 || msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
414 || kthread_should_stop());
415
416 if (kthread_should_stop())
417 break;
418
419 spin_lock_irqsave(&rq_avg_lock, flags);
420 event = scm_data->event;
421 nr = scm_data->nr;
422 scm_data->event = 0;
423 scm_data->nr = 0;
424 spin_unlock_irqrestore(&rq_avg_lock, flags);
425
426 msm_mpd_update_scm(event, nr);
427 }
428 return 0;
429}
430
431static int __ref msm_mpd_set_enabled(uint32_t enable)
432{
433 int ret = 0;
434 int ret0 = 0;
435 int ret1 = 0;
436 int cpu;
437 static uint32_t last_enable;
438
439 enable = (enable > 0) ? 1 : 0;
440 if (last_enable == enable)
441 return ret;
442
443 if (enable) {
444 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
445 if (ret) {
446 pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
447 ret);
448 return ret;
449 }
450 }
451
452 ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
453 &ret0, &ret1);
454 if (ret) {
455 pr_err("Error(%d) %s MP Decision\n",
456 ret, (enable ? "enabling" : "disabling"));
457 } else {
458 last_enable = enable;
459 last_nr = 0;
460 }
461 if (enable) {
462 msm_mpd.next_update = ktime_add_ns(ktime_get(),
463 (msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
464 msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
465 &msm_mpd.data, "msm_mpdecision");
466 if (IS_ERR(msm_mpd.task))
467 return -EFAULT;
468
469 msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
470 &msm_mpd.hpupdate, "msm_hp");
471 if (IS_ERR(msm_mpd.hptask))
472 return -EFAULT;
473
474 for_each_online_cpu(cpu)
475 hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
476 msm_mpd.next_update,
477 HRTIMER_MODE_ABS_PINNED);
478 cpu_pm_register_notifier(&msm_mpd_idle_nb);
479 register_cpu_notifier(&msm_mpd_hotplug_nb);
480 msm_mpd.enabled = 1;
481 } else {
482 for_each_online_cpu(cpu)
483 hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
484 kthread_stop(msm_mpd.hptask);
485 kthread_stop(msm_mpd.task);
486 cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
487 unregister_cpu_notifier(&msm_mpd_hotplug_nb);
488 msm_mpd.enabled = 0;
489 }
490
491 return ret;
492}
493
494static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
495{
496 /*
497 * No need to do anything. Just let the timer set its own next poll
498 * interval when it next fires.
499 */
500 msm_mpd.rq_avg_poll_ms = val;
501 return 0;
502}
503
504static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
505{
506 /*
507 * No need to do anything. Just let the timer set its own next poll
508 * interval when it next fires.
509 */
510 msm_mpd.iowait_threshold_pct = val;
511 return 0;
512}
513
Steve Mucklef5d5d542012-12-06 12:09:24 -0800514static int msm_mpd_set_rq_avg_divide(uint32_t val)
515{
516 /*
517 * No need to do anything. New value will be used next time
518 * the decision is made as to whether to update tz.
519 */
520
521 if (val == 0)
522 return -EINVAL;
523
524 msm_mpd.rq_avg_divide = val;
525 return 0;
526}
527
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700528#define MPD_ALGO_PARAM(_name, _param) \
529static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
530 struct kobj_attribute *attr, char *buf) \
531{ \
532 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
533} \
534static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
535 struct kobj_attribute *attr, const char *buf, size_t count) \
536{ \
537 int ret = 0; \
538 uint32_t val; \
539 uint32_t old_val; \
540 mutex_lock(&msm_mpd.lock); \
541 ret = kstrtouint(buf, 10, &val); \
542 if (ret) { \
543 pr_err("Invalid input %s for %s %d\n", \
544 buf, __stringify(_name), ret);\
545 return 0; \
546 } \
547 old_val = _param; \
548 _param = val; \
549 ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
550 if (ret) { \
551 pr_err("Error %d returned when setting algo param %s to %d\n",\
552 ret, __stringify(_name), val); \
553 _param = old_val; \
554 } \
555 mutex_unlock(&msm_mpd.lock); \
556 return count; \
557}
558
559#define MPD_PARAM(_name, _param) \
560static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
561 struct kobj_attribute *attr, char *buf) \
562{ \
563 return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
564} \
565static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
566 struct kobj_attribute *attr, const char *buf, size_t count) \
567{ \
568 int ret = 0; \
569 uint32_t val; \
570 uint32_t old_val; \
571 mutex_lock(&msm_mpd.lock); \
572 ret = kstrtouint(buf, 10, &val); \
573 if (ret) { \
574 pr_err("Invalid input %s for %s %d\n", \
575 buf, __stringify(_name), ret);\
576 return 0; \
577 } \
578 old_val = _param; \
579 ret = msm_mpd_set_##_name(val); \
580 if (ret) { \
581 pr_err("Error %d returned when setting algo param %s to %d\n",\
582 ret, __stringify(_name), val); \
583 _param = old_val; \
584 } \
585 mutex_unlock(&msm_mpd.lock); \
586 return count; \
587}
588
589#define MPD_RW_ATTRIB(i, _name) \
590 msm_mpd.attrib._name.attr.name = __stringify(_name); \
591 msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
592 msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
593 msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
594 msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
595
596MPD_PARAM(enabled, msm_mpd.enabled);
597MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
598MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
Steve Mucklef5d5d542012-12-06 12:09:24 -0800599MPD_PARAM(rq_avg_divide, msm_mpd.rq_avg_divide);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700600MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
601MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
602MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
603MPD_ALGO_PARAM(mp_em_rounding_point_min,
604 msm_mpd.mp_param.mp_em_rounding_point_min);
605MPD_ALGO_PARAM(mp_em_rounding_point_max,
606 msm_mpd.mp_param.mp_em_rounding_point_max);
607MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
608MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
609MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
610MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
611MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
612MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
613MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
614MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
615MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
616MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
617
618static int __devinit msm_mpd_probe(struct platform_device *pdev)
619{
620 struct kobject *module_kobj = NULL;
621 int ret = 0;
Steve Mucklef5d5d542012-12-06 12:09:24 -0800622 const int attr_count = 20;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700623 struct msm_mpd_algo_param *param = NULL;
624
625 param = pdev->dev.platform_data;
626
627 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
628 if (!module_kobj) {
629 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
630 ret = -ENOENT;
631 goto done;
632 }
633
634 msm_mpd.attrib.attrib_group.attrs =
635 kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
636 if (!msm_mpd.attrib.attrib_group.attrs) {
637 ret = -ENOMEM;
638 goto done;
639 }
640
641 MPD_RW_ATTRIB(0, enabled);
642 MPD_RW_ATTRIB(1, rq_avg_poll_ms);
643 MPD_RW_ATTRIB(2, iowait_threshold_pct);
Steve Mucklef5d5d542012-12-06 12:09:24 -0800644 MPD_RW_ATTRIB(3, rq_avg_divide);
645 MPD_RW_ATTRIB(4, em_win_size_min_us);
646 MPD_RW_ATTRIB(5, em_win_size_max_us);
647 MPD_RW_ATTRIB(6, em_max_util_pct);
648 MPD_RW_ATTRIB(7, mp_em_rounding_point_min);
649 MPD_RW_ATTRIB(8, mp_em_rounding_point_max);
650 MPD_RW_ATTRIB(9, online_util_pct_min);
651 MPD_RW_ATTRIB(10, online_util_pct_max);
652 MPD_RW_ATTRIB(11, slack_time_min_us);
653 MPD_RW_ATTRIB(12, slack_time_max_us);
654 MPD_RW_ATTRIB(13, hp_up_max_ms);
655 MPD_RW_ATTRIB(14, hp_up_ms);
656 MPD_RW_ATTRIB(15, hp_up_count);
657 MPD_RW_ATTRIB(16, hp_dw_max_ms);
658 MPD_RW_ATTRIB(17, hp_dw_ms);
659 MPD_RW_ATTRIB(18, hp_dw_count);
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700660
Steve Mucklef5d5d542012-12-06 12:09:24 -0800661 msm_mpd.attrib.attrib_group.attrs[19] = NULL;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700662 ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
663 if (ret)
664 pr_err("Unable to create sysfs objects :%d\n", ret);
665
666 msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
Steve Mucklef5d5d542012-12-06 12:09:24 -0800667 msm_mpd.rq_avg_divide = DEFAULT_RQ_AVG_DIVIDE;
Abhijeet Dharmapurikara599de42012-08-23 13:49:45 -0700668
669 memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
670
671 debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
672 if (!debugfs_base) {
673 pr_err("Cannot create debugfs base msm_mpdecision\n");
674 ret = -ENOENT;
675 goto done;
676 }
677
678done:
679 if (ret && debugfs_base)
680 debugfs_remove(debugfs_base);
681
682 return ret;
683}
684
685static int __devexit msm_mpd_remove(struct platform_device *pdev)
686{
687 platform_set_drvdata(pdev, NULL);
688
689 return 0;
690}
691
692static struct platform_driver msm_mpd_driver = {
693 .probe = msm_mpd_probe,
694 .remove = __devexit_p(msm_mpd_remove),
695 .driver = {
696 .name = "msm_mpdecision",
697 .owner = THIS_MODULE,
698 },
699};
700
701static int __init msm_mpdecision_init(void)
702{
703 int cpu;
704 if (!msm_mpd_enabled) {
705 pr_info("Not enabled\n");
706 return 0;
707 }
708
709 num_present_hundreds = 100 * num_present_cpus();
710
711 hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
712 HRTIMER_MODE_REL_PINNED);
713 msm_mpd.slack_timer.function = msm_mpd_slack_timer;
714
715 for_each_possible_cpu(cpu) {
716 hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
717 CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
718 per_cpu(rq_avg_poll_timer, cpu).function
719 = msm_mpd_rq_avg_poll_timer;
720 }
721 mutex_init(&msm_mpd.lock);
722 init_waitqueue_head(&msm_mpd.wait_q);
723 init_waitqueue_head(&msm_mpd.wait_hpq);
724 return platform_driver_register(&msm_mpd_driver);
725}
726late_initcall(msm_mpdecision_init);