blob: 79f6c52708e12f76fc9d10b05fd924acb7861f54 [file] [log] [blame]
Lina Iyer514248d2017-02-07 15:10:53 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
3 * Copyright (C) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21#include <linux/mutex.h>
22#include <linux/cpu.h>
23#include <linux/of.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070024#include <linux/hrtimer.h>
25#include <linux/ktime.h>
26#include <linux/tick.h>
27#include <linux/suspend.h>
28#include <linux/pm_qos.h>
29#include <linux/of_platform.h>
30#include <linux/smp.h>
31#include <linux/remote_spinlock.h>
32#include <linux/msm_remote_spinlock.h>
33#include <linux/dma-mapping.h>
34#include <linux/coresight-cti.h>
35#include <linux/moduleparam.h>
36#include <linux/sched.h>
37#include <linux/cpu_pm.h>
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -060038#include <linux/cpuhotplug.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070039#include <soc/qcom/spm.h>
40#include <soc/qcom/pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070041#include <soc/qcom/event_timer.h>
42#include <soc/qcom/lpm-stats.h>
43#include <soc/qcom/jtag.h>
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -060044#include <soc/qcom/system_pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070045#include <asm/cputype.h>
46#include <asm/arch_timer.h>
47#include <asm/cacheflush.h>
48#include <asm/suspend.h>
49#include <asm/cpuidle.h>
50#include "lpm-levels.h"
51#include <trace/events/power.h>
52#define CREATE_TRACE_POINTS
53#include <trace/events/trace_msm_low_power.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070054
55#define SCLK_HZ (32768)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070056#define PSCI_POWER_STATE(reset) (reset << 30)
57#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070058
59enum {
60 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
61 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
62};
63
64enum debug_event {
65 CPU_ENTER,
66 CPU_EXIT,
67 CLUSTER_ENTER,
68 CLUSTER_EXIT,
69 PRE_PC_CB,
70};
71
72struct lpm_debug {
73 cycle_t time;
74 enum debug_event evt;
75 int cpu;
76 uint32_t arg1;
77 uint32_t arg2;
78 uint32_t arg3;
79 uint32_t arg4;
80};
81
82struct lpm_cluster *lpm_root_node;
83
84#define MAXSAMPLES 5
85
86static bool lpm_prediction = true;
87module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
88
89static uint32_t ref_stddev = 100;
90module_param_named(ref_stddev, ref_stddev, uint, 0664);
91
92static uint32_t tmr_add = 100;
93module_param_named(tmr_add, tmr_add, uint, 0664);
94
95struct lpm_history {
96 uint32_t resi[MAXSAMPLES];
97 int mode[MAXSAMPLES];
98 int nsamp;
99 uint32_t hptr;
100 uint32_t hinvalid;
101 uint32_t htmr_wkup;
102 int64_t stime;
103};
104
105static DEFINE_PER_CPU(struct lpm_history, hist);
106
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600107static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700108static bool suspend_in_progress;
109static struct hrtimer lpm_hrtimer;
110static struct hrtimer histtimer;
111static struct lpm_debug *lpm_debug;
112static phys_addr_t lpm_debug_phys;
113static const int num_dbg_elements = 0x100;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700114
115static void cluster_unprepare(struct lpm_cluster *cluster,
116 const struct cpumask *cpu, int child_idx, bool from_idle,
117 int64_t time);
118static void cluster_prepare(struct lpm_cluster *cluster,
119 const struct cpumask *cpu, int child_idx, bool from_idle,
120 int64_t time);
121
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700122static bool menu_select;
123module_param_named(menu_select, menu_select, bool, 0664);
124
125static int msm_pm_sleep_time_override;
126module_param_named(sleep_time_override,
127 msm_pm_sleep_time_override, int, 0664);
128static uint64_t suspend_wake_time;
129
130static bool print_parsed_dt;
131module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
132
133static bool sleep_disabled;
134module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
135
136s32 msm_cpuidle_get_deep_idle_latency(void)
137{
138 return 10;
139}
140
141void lpm_suspend_wake_time(uint64_t wakeup_time)
142{
143 if (wakeup_time <= 0) {
144 suspend_wake_time = msm_pm_sleep_time_override;
145 return;
146 }
147
148 if (msm_pm_sleep_time_override &&
149 (msm_pm_sleep_time_override < wakeup_time))
150 suspend_wake_time = msm_pm_sleep_time_override;
151 else
152 suspend_wake_time = wakeup_time;
153}
154EXPORT_SYMBOL(lpm_suspend_wake_time);
155
156static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
157 struct latency_level *lat_level)
158{
159 struct list_head *list;
160 struct lpm_cluster_level *level;
161 struct lpm_cluster *n;
162 struct power_params *pwr_params;
163 uint32_t latency = 0;
164 int i;
165
166 if (!cluster->list.next) {
167 for (i = 0; i < cluster->nlevels; i++) {
168 level = &cluster->levels[i];
169 pwr_params = &level->pwr;
170 if (lat_level->reset_level == level->reset_level) {
171 if ((latency > pwr_params->latency_us)
172 || (!latency))
173 latency = pwr_params->latency_us;
174 break;
175 }
176 }
177 } else {
178 list_for_each(list, &cluster->parent->child) {
179 n = list_entry(list, typeof(*n), list);
180 if (lat_level->level_name) {
181 if (strcmp(lat_level->level_name,
182 n->cluster_name))
183 continue;
184 }
185 for (i = 0; i < n->nlevels; i++) {
186 level = &n->levels[i];
187 pwr_params = &level->pwr;
188 if (lat_level->reset_level ==
189 level->reset_level) {
190 if ((latency > pwr_params->latency_us)
191 || (!latency))
192 latency =
193 pwr_params->latency_us;
194 break;
195 }
196 }
197 }
198 }
199 return latency;
200}
201
202static uint32_t least_cpu_latency(struct list_head *child,
203 struct latency_level *lat_level)
204{
205 struct list_head *list;
206 struct lpm_cpu_level *level;
207 struct power_params *pwr_params;
208 struct lpm_cpu *cpu;
209 struct lpm_cluster *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600210 uint32_t lat = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700211 int i;
212
213 list_for_each(list, child) {
214 n = list_entry(list, typeof(*n), list);
215 if (lat_level->level_name) {
216 if (strcmp(lat_level->level_name, n->cluster_name))
217 continue;
218 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600219 list_for_each_entry(cpu, &n->cpu, list) {
220 for (i = 0; i < cpu->nlevels; i++) {
221 level = &cpu->levels[i];
222 pwr_params = &level->pwr;
223 if (lat_level->reset_level
224 == level->reset_level) {
225 if ((lat > pwr_params->latency_us)
226 || (!lat))
227 lat = pwr_params->latency_us;
228 break;
229 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700230 }
231 }
232 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600233 return lat;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700234}
235
236static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
237 int affinity_level)
238{
239 struct lpm_cluster *n;
240
241 if ((cluster->aff_level == affinity_level)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600242 || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700243 return cluster;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600244 else if (list_empty(&cluster->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700245 n = list_entry(cluster->child.next, typeof(*n), list);
246 return cluster_aff_match(n, affinity_level);
247 } else
248 return NULL;
249}
250
251int lpm_get_latency(struct latency_level *level, uint32_t *latency)
252{
253 struct lpm_cluster *cluster;
254 uint32_t val;
255
256 if (!lpm_root_node) {
257 pr_err("%s: lpm_probe not completed\n", __func__);
258 return -EAGAIN;
259 }
260
261 if ((level->affinity_level < 0)
262 || (level->affinity_level > lpm_root_node->aff_level)
263 || (level->reset_level < LPM_RESET_LVL_RET)
264 || (level->reset_level > LPM_RESET_LVL_PC)
265 || !latency)
266 return -EINVAL;
267
268 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
269 if (!cluster) {
270 pr_err("%s:No matching cluster found for affinity_level:%d\n",
271 __func__, level->affinity_level);
272 return -EINVAL;
273 }
274
275 if (level->affinity_level == 0)
276 val = least_cpu_latency(&cluster->parent->child, level);
277 else
278 val = least_cluster_latency(cluster, level);
279
280 if (!val) {
281 pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
282 __func__, level->affinity_level, level->reset_level);
283 return -EINVAL;
284 }
285
286 *latency = val;
287
288 return 0;
289}
290EXPORT_SYMBOL(lpm_get_latency);
291
292static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
293 uint32_t arg2, uint32_t arg3, uint32_t arg4)
294{
295 struct lpm_debug *dbg;
296 int idx;
297 static DEFINE_SPINLOCK(debug_lock);
298 static int pc_event_index;
299
300 if (!lpm_debug)
301 return;
302
303 spin_lock(&debug_lock);
304 idx = pc_event_index++;
305 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
306
307 dbg->evt = event;
308 dbg->time = arch_counter_get_cntvct();
309 dbg->cpu = raw_smp_processor_id();
310 dbg->arg1 = arg1;
311 dbg->arg2 = arg2;
312 dbg->arg3 = arg3;
313 dbg->arg4 = arg4;
314 spin_unlock(&debug_lock);
315}
316
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600317static int lpm_dying_cpu(unsigned int cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700318{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600319 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700320
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600321 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
322 return 0;
323}
324
325static int lpm_starting_cpu(unsigned int cpu)
326{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600327 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600328
329 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
330 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700331}
332
333static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
334{
335 return HRTIMER_NORESTART;
336}
337
338static void histtimer_cancel(void)
339{
340 hrtimer_try_to_cancel(&histtimer);
341}
342
343static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
344{
345 int cpu = raw_smp_processor_id();
346 struct lpm_history *history = &per_cpu(hist, cpu);
347
348 history->hinvalid = 1;
349 return HRTIMER_NORESTART;
350}
351
352static void histtimer_start(uint32_t time_us)
353{
354 uint64_t time_ns = time_us * NSEC_PER_USEC;
355 ktime_t hist_ktime = ns_to_ktime(time_ns);
356
357 histtimer.function = histtimer_fn;
358 hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
359}
360
361static void cluster_timer_init(struct lpm_cluster *cluster)
362{
363 struct list_head *list;
364
365 if (!cluster)
366 return;
367
368 hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
369
370 list_for_each(list, &cluster->child) {
371 struct lpm_cluster *n;
372
373 n = list_entry(list, typeof(*n), list);
374 cluster_timer_init(n);
375 }
376}
377
378static void clusttimer_cancel(void)
379{
380 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600381 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700382
383 hrtimer_try_to_cancel(&cluster->histtimer);
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700384
385 if (cluster->parent)
386 hrtimer_try_to_cancel(&cluster->parent->histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700387}
388
389static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
390{
391 struct lpm_cluster *cluster = container_of(h,
392 struct lpm_cluster, histtimer);
393
394 cluster->history.hinvalid = 1;
395 return HRTIMER_NORESTART;
396}
397
398static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
399{
400 uint64_t time_ns = time_us * NSEC_PER_USEC;
401 ktime_t clust_ktime = ns_to_ktime(time_ns);
402
403 cluster->histtimer.function = clusttimer_fn;
404 hrtimer_start(&cluster->histtimer, clust_ktime,
405 HRTIMER_MODE_REL_PINNED);
406}
407
408static void msm_pm_set_timer(uint32_t modified_time_us)
409{
410 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
411 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
412
413 lpm_hrtimer.function = lpm_hrtimer_cb;
414 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
415}
416
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700417static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
418 struct lpm_cpu *cpu, int *idx_restrict,
419 uint32_t *idx_restrict_time)
420{
421 int i, j, divisor;
422 uint64_t max, avg, stddev;
423 int64_t thresh = LLONG_MAX;
424 struct lpm_history *history = &per_cpu(hist, dev->cpu);
425 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
426
427 if (!lpm_prediction)
428 return 0;
429
430 /*
431 * Samples are marked invalid when woken-up due to timer,
432 * so donot predict.
433 */
434 if (history->hinvalid) {
435 history->hinvalid = 0;
436 history->htmr_wkup = 1;
437 history->stime = 0;
438 return 0;
439 }
440
441 /*
442 * Predict only when all the samples are collected.
443 */
444 if (history->nsamp < MAXSAMPLES) {
445 history->stime = 0;
446 return 0;
447 }
448
449 /*
450 * Check if the samples are not much deviated, if so use the
451 * average of those as predicted sleep time. Else if any
452 * specific mode has more premature exits return the index of
453 * that mode.
454 */
455
456again:
457 max = avg = divisor = stddev = 0;
458 for (i = 0; i < MAXSAMPLES; i++) {
459 int64_t value = history->resi[i];
460
461 if (value <= thresh) {
462 avg += value;
463 divisor++;
464 if (value > max)
465 max = value;
466 }
467 }
468 do_div(avg, divisor);
469
470 for (i = 0; i < MAXSAMPLES; i++) {
471 int64_t value = history->resi[i];
472
473 if (value <= thresh) {
474 int64_t diff = value - avg;
475
476 stddev += diff * diff;
477 }
478 }
479 do_div(stddev, divisor);
480 stddev = int_sqrt(stddev);
481
482 /*
483 * If the deviation is less, return the average, else
484 * ignore one maximum sample and retry
485 */
486 if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
487 || stddev <= ref_stddev) {
488 history->stime = ktime_to_us(ktime_get()) + avg;
489 return avg;
490 } else if (divisor > (MAXSAMPLES - 1)) {
491 thresh = max - 1;
492 goto again;
493 }
494
495 /*
496 * Find the number of premature exits for each of the mode,
497 * excluding clockgating mode, and they are more than fifty
498 * percent restrict that and deeper modes.
499 */
500 if (history->htmr_wkup != 1) {
501 for (j = 1; j < cpu->nlevels; j++) {
502 uint32_t failed = 0;
503 uint64_t total = 0;
504
505 for (i = 0; i < MAXSAMPLES; i++) {
506 if ((history->mode[i] == j) &&
507 (history->resi[i] < min_residency[j])) {
508 failed++;
509 total += history->resi[i];
510 }
511 }
512 if (failed > (MAXSAMPLES/2)) {
513 *idx_restrict = j;
514 do_div(total, failed);
515 *idx_restrict_time = total;
516 history->stime = ktime_to_us(ktime_get())
517 + *idx_restrict_time;
518 break;
519 }
520 }
521 }
522 return 0;
523}
524
525static inline void invalidate_predict_history(struct cpuidle_device *dev)
526{
527 struct lpm_history *history = &per_cpu(hist, dev->cpu);
528
529 if (!lpm_prediction)
530 return;
531
532 if (history->hinvalid) {
533 history->hinvalid = 0;
534 history->htmr_wkup = 1;
535 history->stime = 0;
536 }
537}
538
539static void clear_predict_history(void)
540{
541 struct lpm_history *history;
542 int i;
543 unsigned int cpu;
544
545 if (!lpm_prediction)
546 return;
547
548 for_each_possible_cpu(cpu) {
549 history = &per_cpu(hist, cpu);
550 for (i = 0; i < MAXSAMPLES; i++) {
551 history->resi[i] = 0;
552 history->mode[i] = -1;
553 history->hptr = 0;
554 history->nsamp = 0;
555 history->stime = 0;
556 }
557 }
558}
559
560static void update_history(struct cpuidle_device *dev, int idx);
561
562static int cpu_power_select(struct cpuidle_device *dev,
563 struct lpm_cpu *cpu)
564{
565 int best_level = -1;
566 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
567 dev->cpu);
568 uint32_t sleep_us =
569 (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
570 uint32_t modified_time_us = 0;
571 uint32_t next_event_us = 0;
572 int i, idx_restrict;
573 uint32_t lvl_latency_us = 0;
574 uint64_t predicted = 0;
575 uint32_t htime = 0, idx_restrict_time = 0;
576 uint32_t next_wakeup_us = sleep_us;
577 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
578 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
579
580 if (!cpu)
581 return -EINVAL;
582
583 if (sleep_disabled)
584 return 0;
585
586 idx_restrict = cpu->nlevels + 1;
587
588 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
589
590 for (i = 0; i < cpu->nlevels; i++) {
591 struct lpm_cpu_level *level = &cpu->levels[i];
592 struct power_params *pwr_params = &level->pwr;
593 enum msm_pm_sleep_mode mode = level->mode;
594 bool allow;
595
596 allow = lpm_cpu_mode_allow(dev->cpu, i, true);
597
598 if (!allow)
599 continue;
600
601 lvl_latency_us = pwr_params->latency_us;
602
603 if (latency_us < lvl_latency_us)
604 break;
605
606 if (next_event_us) {
607 if (next_event_us < lvl_latency_us)
608 break;
609
610 if (((next_event_us - lvl_latency_us) < sleep_us) ||
611 (next_event_us < sleep_us))
612 next_wakeup_us = next_event_us - lvl_latency_us;
613 }
614
615 if (!i) {
616 /*
617 * If the next_wake_us itself is not sufficient for
618 * deeper low power modes than clock gating do not
619 * call prediction.
620 */
621 if (next_wakeup_us > max_residency[i]) {
622 predicted = lpm_cpuidle_predict(dev, cpu,
623 &idx_restrict, &idx_restrict_time);
624 if (predicted < min_residency[i])
625 predicted = 0;
626 } else
627 invalidate_predict_history(dev);
628 }
629
630 if (i >= idx_restrict)
631 break;
632
633 best_level = i;
634
635 if (next_event_us && next_event_us < sleep_us &&
636 (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
637 modified_time_us
638 = next_event_us - lvl_latency_us;
639 else
640 modified_time_us = 0;
641
642 if (predicted ? (predicted <= max_residency[i])
643 : (next_wakeup_us <= max_residency[i]))
644 break;
645 }
646
647 if (modified_time_us)
648 msm_pm_set_timer(modified_time_us);
649
650 /*
651 * Start timer to avoid staying in shallower mode forever
652 * incase of misprediciton
653 */
654 if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
655 && ((best_level >= 0)
656 && (best_level < (cpu->nlevels-1)))) {
657 htime = predicted + tmr_add;
658 if (htime == tmr_add)
659 htime = idx_restrict_time;
660 else if (htime > max_residency[best_level])
661 htime = max_residency[best_level];
662
663 if ((next_wakeup_us > htime) &&
664 ((next_wakeup_us - htime) > max_residency[best_level]))
665 histtimer_start(htime);
666 }
667
668 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
669
670 trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
671 predicted, htime);
672
673 return best_level;
674}
675
676static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
677 struct cpumask *mask, bool from_idle, uint32_t *pred_time)
678{
679 int cpu;
680 int next_cpu = raw_smp_processor_id();
681 ktime_t next_event;
682 struct cpumask online_cpus_in_cluster;
683 struct lpm_history *history;
684 int64_t prediction = LONG_MAX;
685
686 next_event.tv64 = KTIME_MAX;
687 if (!suspend_wake_time)
688 suspend_wake_time = msm_pm_sleep_time_override;
689 if (!from_idle) {
690 if (mask)
691 cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
692 if (!suspend_wake_time)
693 return ~0ULL;
694 else
695 return USEC_PER_SEC * suspend_wake_time;
696 }
697
698 cpumask_and(&online_cpus_in_cluster,
699 &cluster->num_children_in_sync, cpu_online_mask);
700
701 for_each_cpu(cpu, &online_cpus_in_cluster) {
702 ktime_t *next_event_c;
703
704 next_event_c = get_next_event_cpu(cpu);
705 if (next_event_c->tv64 < next_event.tv64) {
706 next_event.tv64 = next_event_c->tv64;
707 next_cpu = cpu;
708 }
709
710 if (from_idle && lpm_prediction) {
711 history = &per_cpu(hist, cpu);
712 if (history->stime && (history->stime < prediction))
713 prediction = history->stime;
714 }
715 }
716
717 if (mask)
718 cpumask_copy(mask, cpumask_of(next_cpu));
719
720 if (from_idle && lpm_prediction) {
721 if (prediction > ktime_to_us(ktime_get()))
722 *pred_time = prediction - ktime_to_us(ktime_get());
723 }
724
725 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
726 return ktime_to_us(ktime_sub(next_event, ktime_get()));
727 else
728 return 0;
729}
730
731static int cluster_predict(struct lpm_cluster *cluster,
732 uint32_t *pred_us)
733{
734 int i, j;
735 int ret = 0;
736 struct cluster_history *history = &cluster->history;
737 int64_t cur_time = ktime_to_us(ktime_get());
738
739 if (!lpm_prediction)
740 return 0;
741
742 if (history->hinvalid) {
743 history->hinvalid = 0;
744 history->htmr_wkup = 1;
745 history->flag = 0;
746 return ret;
747 }
748
749 if (history->nsamp == MAXSAMPLES) {
750 for (i = 0; i < MAXSAMPLES; i++) {
751 if ((cur_time - history->stime[i])
752 > CLUST_SMPL_INVLD_TIME)
753 history->nsamp--;
754 }
755 }
756
757 if (history->nsamp < MAXSAMPLES) {
758 history->flag = 0;
759 return ret;
760 }
761
762 if (history->flag == 2)
763 history->flag = 0;
764
765 if (history->htmr_wkup != 1) {
766 uint64_t total = 0;
767
768 if (history->flag == 1) {
769 for (i = 0; i < MAXSAMPLES; i++)
770 total += history->resi[i];
771 do_div(total, MAXSAMPLES);
772 *pred_us = total;
773 return 2;
774 }
775
776 for (j = 1; j < cluster->nlevels; j++) {
777 uint32_t failed = 0;
778
779 total = 0;
780 for (i = 0; i < MAXSAMPLES; i++) {
781 if ((history->mode[i] == j) && (history->resi[i]
782 < cluster->levels[j].pwr.min_residency)) {
783 failed++;
784 total += history->resi[i];
785 }
786 }
787
788 if (failed > (MAXSAMPLES-2)) {
789 do_div(total, failed);
790 *pred_us = total;
791 history->flag = 1;
792 return 1;
793 }
794 }
795 }
796
797 return ret;
798}
799
800static void update_cluster_history_time(struct cluster_history *history,
801 int idx, uint64_t start)
802{
803 history->entry_idx = idx;
804 history->entry_time = start;
805}
806
807static void update_cluster_history(struct cluster_history *history, int idx)
808{
809 uint32_t tmr = 0;
810 uint32_t residency = 0;
811 struct lpm_cluster *cluster =
812 container_of(history, struct lpm_cluster, history);
813
814 if (!lpm_prediction)
815 return;
816
817 if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
818 residency = ktime_to_us(ktime_get()) - history->entry_time;
819 history->stime[history->hptr] = history->entry_time;
820 } else
821 return;
822
823 if (history->htmr_wkup) {
824 if (!history->hptr)
825 history->hptr = MAXSAMPLES-1;
826 else
827 history->hptr--;
828
829 history->resi[history->hptr] += residency;
830
831 history->htmr_wkup = 0;
832 tmr = 1;
833 } else {
834 history->resi[history->hptr] = residency;
835 }
836
837 history->mode[history->hptr] = idx;
838
839 history->entry_idx = INT_MIN;
840 history->entry_time = 0;
841
842 if (history->nsamp < MAXSAMPLES)
843 history->nsamp++;
844
845 trace_cluster_pred_hist(cluster->cluster_name,
846 history->mode[history->hptr], history->resi[history->hptr],
847 history->hptr, tmr);
848
849 (history->hptr)++;
850
851 if (history->hptr >= MAXSAMPLES)
852 history->hptr = 0;
853}
854
855static void clear_cl_history_each(struct cluster_history *history)
856{
857 int i;
858
859 for (i = 0; i < MAXSAMPLES; i++) {
860 history->resi[i] = 0;
861 history->mode[i] = -1;
862 history->stime[i] = 0;
863 }
864 history->hptr = 0;
865 history->nsamp = 0;
866 history->flag = 0;
867 history->hinvalid = 0;
868 history->htmr_wkup = 0;
869}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700870static void clear_cl_predict_history(void)
871{
872 struct lpm_cluster *cluster = lpm_root_node;
873 struct list_head *list;
874
875 if (!lpm_prediction)
876 return;
877
878 clear_cl_history_each(&cluster->history);
879
880 list_for_each(list, &cluster->child) {
881 struct lpm_cluster *n;
882
883 n = list_entry(list, typeof(*n), list);
884 clear_cl_history_each(&n->history);
885 }
886}
887
888static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
889 int *ispred)
890{
891 int best_level = -1;
892 int i;
893 struct cpumask mask;
894 uint32_t latency_us = ~0U;
895 uint32_t sleep_us;
896 uint32_t cpupred_us = 0, pred_us = 0;
897 int pred_mode = 0, predicted = 0;
898
899 if (!cluster)
900 return -EINVAL;
901
902 sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
903 from_idle, &cpupred_us);
904
905 if (from_idle) {
906 pred_mode = cluster_predict(cluster, &pred_us);
907
908 if (cpupred_us && pred_mode && (cpupred_us < pred_us))
909 pred_us = cpupred_us;
910
911 if (pred_us && pred_mode && (pred_us < sleep_us))
912 predicted = 1;
913
914 if (predicted && (pred_us == cpupred_us))
915 predicted = 2;
916 }
917
918 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
919 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
920 &mask);
921
922 /*
923 * If atleast one of the core in the cluster is online, the cluster
924 * low power modes should be determined by the idle characteristics
925 * even if the last core enters the low power mode as a part of
926 * hotplug.
927 */
928
929 if (!from_idle && num_online_cpus() > 1 &&
930 cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
931 from_idle = true;
932
933 for (i = 0; i < cluster->nlevels; i++) {
934 struct lpm_cluster_level *level = &cluster->levels[i];
935 struct power_params *pwr_params = &level->pwr;
936
937 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
938 continue;
939
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700940 if (!cpumask_equal(&cluster->num_children_in_sync,
941 &level->num_cpu_votes))
942 continue;
943
944 if (from_idle && latency_us < pwr_params->latency_us)
945 break;
946
947 if (sleep_us < pwr_params->time_overhead_us)
948 break;
949
950 if (suspend_in_progress && from_idle && level->notify_rpm)
951 continue;
952
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700953 best_level = i;
954
955 if (predicted ? (pred_us <= pwr_params->max_residency)
956 : (sleep_us <= pwr_params->max_residency))
957 break;
958 }
959
960 if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
961 cluster->history.flag = 2;
962
963 *ispred = predicted;
964
965 trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
966 latency_us, predicted, pred_us);
967
968 return best_level;
969}
970
971static void cluster_notify(struct lpm_cluster *cluster,
972 struct lpm_cluster_level *level, bool enter)
973{
974 if (level->is_reset && enter)
975 cpu_cluster_pm_enter(cluster->aff_level);
976 else if (level->is_reset && !enter)
977 cpu_cluster_pm_exit(cluster->aff_level);
978}
979
980static int cluster_configure(struct lpm_cluster *cluster, int idx,
981 bool from_idle, int predicted)
982{
983 struct lpm_cluster_level *level = &cluster->levels[idx];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700984
985 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
986 || is_IPI_pending(&cluster->num_children_in_sync)) {
987 return -EPERM;
988 }
989
990 if (idx != cluster->default_level) {
991 update_debug_pc_event(CLUSTER_ENTER, idx,
992 cluster->num_children_in_sync.bits[0],
993 cluster->child_cpus.bits[0], from_idle);
994 trace_cluster_enter(cluster->cluster_name, idx,
995 cluster->num_children_in_sync.bits[0],
996 cluster->child_cpus.bits[0], from_idle);
997 lpm_stats_cluster_enter(cluster->stats, idx);
998
999 if (from_idle && lpm_prediction)
1000 update_cluster_history_time(&cluster->history, idx,
1001 ktime_to_us(ktime_get()));
1002 }
1003
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001004 if (level->notify_rpm) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001005 uint64_t us;
1006 uint32_t pred_us;
1007
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001008 us = get_cluster_sleep_time(cluster, NULL, from_idle,
1009 &pred_us);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001010 us = us + 1;
1011 clear_predict_history();
1012 clear_cl_predict_history();
1013
1014 do_div(us, USEC_PER_SEC/SCLK_HZ);
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001015 system_sleep_enter(us);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001016 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001017 /* Notify cluster enter event after successfully config completion */
1018 cluster_notify(cluster, level, true);
1019
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001020 cluster->last_level = idx;
1021
1022 if (predicted && (idx < (cluster->nlevels - 1))) {
1023 struct power_params *pwr_params = &cluster->levels[idx].pwr;
1024
1025 tick_broadcast_exit();
1026 clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
1027 tick_broadcast_enter();
1028 }
1029
1030 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001031}
1032
1033static void cluster_prepare(struct lpm_cluster *cluster,
1034 const struct cpumask *cpu, int child_idx, bool from_idle,
1035 int64_t start_time)
1036{
1037 int i;
1038 int predicted = 0;
1039
1040 if (!cluster)
1041 return;
1042
1043 if (cluster->min_child_level > child_idx)
1044 return;
1045
1046 spin_lock(&cluster->sync_lock);
1047 cpumask_or(&cluster->num_children_in_sync, cpu,
1048 &cluster->num_children_in_sync);
1049
1050 for (i = 0; i < cluster->nlevels; i++) {
1051 struct lpm_cluster_level *lvl = &cluster->levels[i];
1052
1053 if (child_idx >= lvl->min_child_level)
1054 cpumask_or(&lvl->num_cpu_votes, cpu,
1055 &lvl->num_cpu_votes);
1056 }
1057
1058 /*
1059 * cluster_select() does not make any configuration changes. So its ok
1060 * to release the lock here. If a core wakes up for a rude request,
1061 * it need not wait for another to finish its cluster selection and
1062 * configuration process
1063 */
1064
1065 if (!cpumask_equal(&cluster->num_children_in_sync,
1066 &cluster->child_cpus))
1067 goto failed;
1068
1069 i = cluster_select(cluster, from_idle, &predicted);
1070
1071 if (((i < 0) || (i == cluster->default_level))
1072 && predicted && from_idle) {
1073 update_cluster_history_time(&cluster->history,
1074 -1, ktime_to_us(ktime_get()));
1075
1076 if (i < 0) {
1077 struct power_params *pwr_params =
1078 &cluster->levels[0].pwr;
1079
1080 tick_broadcast_exit();
1081 clusttimer_start(cluster,
1082 pwr_params->max_residency + tmr_add);
1083 tick_broadcast_enter();
1084 }
1085 }
1086
1087 if (i < 0)
1088 goto failed;
1089
1090 if (cluster_configure(cluster, i, from_idle, predicted))
1091 goto failed;
1092
1093 cluster->stats->sleep_time = start_time;
1094 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
1095 from_idle, start_time);
1096
1097 spin_unlock(&cluster->sync_lock);
1098 return;
1099failed:
1100 spin_unlock(&cluster->sync_lock);
1101 cluster->stats->sleep_time = 0;
1102}
1103
1104static void cluster_unprepare(struct lpm_cluster *cluster,
1105 const struct cpumask *cpu, int child_idx, bool from_idle,
1106 int64_t end_time)
1107{
1108 struct lpm_cluster_level *level;
1109 bool first_cpu;
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001110 int last_level, i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001111
1112 if (!cluster)
1113 return;
1114
1115 if (cluster->min_child_level > child_idx)
1116 return;
1117
1118 spin_lock(&cluster->sync_lock);
1119 last_level = cluster->default_level;
1120 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
1121 &cluster->child_cpus);
1122 cpumask_andnot(&cluster->num_children_in_sync,
1123 &cluster->num_children_in_sync, cpu);
1124
1125 for (i = 0; i < cluster->nlevels; i++) {
1126 struct lpm_cluster_level *lvl = &cluster->levels[i];
1127
1128 if (child_idx >= lvl->min_child_level)
1129 cpumask_andnot(&lvl->num_cpu_votes,
1130 &lvl->num_cpu_votes, cpu);
1131 }
1132
1133 if (from_idle && first_cpu &&
1134 (cluster->last_level == cluster->default_level))
1135 update_cluster_history(&cluster->history, cluster->last_level);
1136
1137 if (!first_cpu || cluster->last_level == cluster->default_level)
1138 goto unlock_return;
1139
1140 if (cluster->stats->sleep_time)
1141 cluster->stats->sleep_time = end_time -
1142 cluster->stats->sleep_time;
1143 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
1144
1145 level = &cluster->levels[cluster->last_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001146
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001147 if (level->notify_rpm)
1148 system_sleep_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001149
1150 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
1151 cluster->num_children_in_sync.bits[0],
1152 cluster->child_cpus.bits[0], from_idle);
1153 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
1154 cluster->num_children_in_sync.bits[0],
1155 cluster->child_cpus.bits[0], from_idle);
1156
1157 last_level = cluster->last_level;
1158 cluster->last_level = cluster->default_level;
1159
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001160 for (i = 0; i < cluster->ndevices; i++)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001161 level = &cluster->levels[cluster->default_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001162
1163 cluster_notify(cluster, &cluster->levels[last_level], false);
1164
1165 if (from_idle)
1166 update_cluster_history(&cluster->history, last_level);
1167
1168 cluster_unprepare(cluster->parent, &cluster->child_cpus,
1169 last_level, from_idle, end_time);
1170unlock_return:
1171 spin_unlock(&cluster->sync_lock);
1172}
1173
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001174static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001175 bool from_idle)
1176{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001177 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
1178 bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001179
1180 /* Use broadcast timer for aggregating sleep mode within a cluster.
1181 * A broadcast timer could be used in the following scenarios
1182 * 1) The architected timer HW gets reset during certain low power
1183 * modes and the core relies on a external(broadcast) timer to wake up
1184 * from sleep. This information is passed through device tree.
1185 * 2) The CPU low power mode could trigger a system low power mode.
1186 * The low power module relies on Broadcast timer to aggregate the
1187 * next wakeup within a cluster, in which case, CPU switches over to
1188 * use broadcast timer.
1189 */
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001190 if (from_idle && cpu_level->use_bc_timer)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001191 tick_broadcast_enter();
1192
1193 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
1194 || (cpu_level->mode ==
1195 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
1196 || (cpu_level->is_reset)))
1197 cpu_pm_enter();
1198
1199 /*
1200 * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
1201 */
1202 if (jtag_save_restore)
1203 msm_jtag_save_state();
1204}
1205
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001206static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001207 bool from_idle)
1208{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001209 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
1210 bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001211
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001212 if (from_idle && cpu_level->use_bc_timer)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001213 tick_broadcast_exit();
1214
1215 if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
1216 || (cpu_level->mode ==
1217 MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
1218 || cpu_level->is_reset))
1219 cpu_pm_exit();
1220
1221 /*
1222 * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
1223 */
1224 if (jtag_save_restore)
1225 msm_jtag_restore_state();
1226}
1227
1228int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
1229{
1230 int state_id = 0;
1231
1232 if (!cluster)
1233 return 0;
1234
1235 spin_lock(&cluster->sync_lock);
1236
1237 if (!cpumask_equal(&cluster->num_children_in_sync,
1238 &cluster->child_cpus))
1239 goto unlock_and_return;
1240
1241 state_id |= get_cluster_id(cluster->parent, aff_lvl);
1242
1243 if (cluster->last_level != cluster->default_level) {
1244 struct lpm_cluster_level *level
1245 = &cluster->levels[cluster->last_level];
1246
1247 state_id |= (level->psci_id & cluster->psci_mode_mask)
1248 << cluster->psci_mode_shift;
1249 (*aff_lvl)++;
1250 }
1251unlock_and_return:
1252 spin_unlock(&cluster->sync_lock);
1253 return state_id;
1254}
1255
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001256static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001257{
1258 int affinity_level = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001259 int state_id = get_cluster_id(cpu->parent, &affinity_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001260 int power_state =
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001261 PSCI_POWER_STATE(cpu->levels[idx].is_reset);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001262 bool success = false;
1263 /*
1264 * idx = 0 is the default LPM state
1265 */
1266 if (!idx) {
1267 stop_critical_timings();
1268 wfi();
1269 start_critical_timings();
1270 return 1;
1271 }
1272
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001273 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
1274 state_id |= (power_state | affinity_level
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001275 | cpu->levels[idx].psci_id);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001276
1277 update_debug_pc_event(CPU_ENTER, state_id,
1278 0xdeaffeed, 0xdeaffeed, true);
1279 stop_critical_timings();
1280 success = !arm_cpuidle_suspend(state_id);
1281 start_critical_timings();
1282 update_debug_pc_event(CPU_EXIT, state_id,
1283 success, 0xdeaffeed, true);
1284 return success;
1285}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001286
1287static int lpm_cpuidle_select(struct cpuidle_driver *drv,
1288 struct cpuidle_device *dev)
1289{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001290 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001291 int idx;
1292
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001293 if (!cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001294 return 0;
1295
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001296 idx = cpu_power_select(dev, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001297
1298 if (idx < 0)
Lina Iyer514248d2017-02-07 15:10:53 -07001299 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001300
1301 return idx;
1302}
1303
1304static void update_history(struct cpuidle_device *dev, int idx)
1305{
1306 struct lpm_history *history = &per_cpu(hist, dev->cpu);
1307 uint32_t tmr = 0;
1308
1309 if (!lpm_prediction)
1310 return;
1311
1312 if (history->htmr_wkup) {
1313 if (!history->hptr)
1314 history->hptr = MAXSAMPLES-1;
1315 else
1316 history->hptr--;
1317
1318 history->resi[history->hptr] += dev->last_residency;
1319 history->htmr_wkup = 0;
1320 tmr = 1;
1321 } else
1322 history->resi[history->hptr] = dev->last_residency;
1323
1324 history->mode[history->hptr] = idx;
1325
1326 trace_cpu_pred_hist(history->mode[history->hptr],
1327 history->resi[history->hptr], history->hptr, tmr);
1328
1329 if (history->nsamp < MAXSAMPLES)
1330 history->nsamp++;
1331
1332 (history->hptr)++;
1333 if (history->hptr >= MAXSAMPLES)
1334 history->hptr = 0;
1335}
1336
1337static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1338 struct cpuidle_driver *drv, int idx)
1339{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001340 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001341 bool success = true;
1342 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1343 int64_t start_time = ktime_to_ns(ktime_get()), end_time;
1344 struct power_params *pwr_params;
1345
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001346 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001347
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001348 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001349
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001350 cpu_prepare(cpu, idx, true);
1351 cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001352
1353 trace_cpu_idle_enter(idx);
1354 lpm_stats_cpu_enter(idx, start_time);
1355
1356 if (need_resched() || (idx < 0))
1357 goto exit;
1358
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001359 success = psci_enter_sleep(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001360
1361exit:
1362 end_time = ktime_to_ns(ktime_get());
1363 lpm_stats_cpu_exit(idx, end_time, success);
1364
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001365 cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
1366 cpu_unprepare(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001367 sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
1368 end_time = ktime_to_ns(ktime_get()) - start_time;
1369 do_div(end_time, 1000);
1370 dev->last_residency = end_time;
1371 update_history(dev, idx);
1372 trace_cpu_idle_exit(idx, success);
1373 local_irq_enable();
1374 if (lpm_prediction) {
1375 histtimer_cancel();
1376 clusttimer_cancel();
1377 }
1378 return idx;
1379}
1380
1381#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1382static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1383 struct cpumask *mask)
1384{
1385 struct cpuidle_device *device;
1386 int cpu, ret;
1387
1388
1389 if (!mask || !drv)
1390 return -EINVAL;
1391
1392 drv->cpumask = mask;
1393 ret = cpuidle_register_driver(drv);
1394 if (ret) {
1395 pr_err("Failed to register cpuidle driver %d\n", ret);
1396 goto failed_driver_register;
1397 }
1398
1399 for_each_cpu(cpu, mask) {
1400 device = &per_cpu(cpuidle_dev, cpu);
1401 device->cpu = cpu;
1402
1403 ret = cpuidle_register_device(device);
1404 if (ret) {
1405 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1406 cpu);
1407 goto failed_driver_register;
1408 }
1409 }
1410 return ret;
1411failed_driver_register:
1412 for_each_cpu(cpu, mask)
1413 cpuidle_unregister_driver(drv);
1414 return ret;
1415}
1416#else
1417static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1418 struct cpumask *mask)
1419{
1420 return cpuidle_register(drv, NULL);
1421}
1422#endif
1423
1424static struct cpuidle_governor lpm_governor = {
1425 .name = "qcom",
1426 .rating = 30,
1427 .select = lpm_cpuidle_select,
1428 .owner = THIS_MODULE,
1429};
1430
1431static int cluster_cpuidle_register(struct lpm_cluster *cl)
1432{
1433 int i = 0, ret = 0;
1434 unsigned int cpu;
1435 struct lpm_cluster *p = NULL;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001436 struct lpm_cpu *lpm_cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001437
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001438 if (list_empty(&cl->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001439 struct lpm_cluster *n;
1440
1441 list_for_each_entry(n, &cl->child, list) {
1442 ret = cluster_cpuidle_register(n);
1443 if (ret)
1444 break;
1445 }
1446 return ret;
1447 }
1448
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001449 list_for_each_entry(lpm_cpu, &cl->cpu, list) {
1450 lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
1451 if (!lpm_cpu->drv)
1452 return -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001453
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001454 lpm_cpu->drv->name = "msm_idle";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001455
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001456 for (i = 0; i < lpm_cpu->nlevels; i++) {
1457 struct cpuidle_state *st = &lpm_cpu->drv->states[i];
1458 struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001459
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001460 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
1461 snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
1462 st->flags = 0;
1463 st->exit_latency = cpu_level->pwr.latency_us;
1464 st->power_usage = cpu_level->pwr.ss_power;
1465 st->target_residency = 0;
1466 st->enter = lpm_cpuidle_enter;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001467 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001468
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001469 lpm_cpu->drv->state_count = lpm_cpu->nlevels;
1470 lpm_cpu->drv->safe_state_index = 0;
1471 for_each_cpu(cpu, &lpm_cpu->related_cpus)
1472 per_cpu(cpu_lpm, cpu) = lpm_cpu;
1473
1474 for_each_possible_cpu(cpu) {
1475 if (cpu_online(cpu))
1476 continue;
1477 if (per_cpu(cpu_lpm, cpu))
1478 p = per_cpu(cpu_lpm, cpu)->parent;
1479 while (p) {
1480 int j;
1481
1482 spin_lock(&p->sync_lock);
1483 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1484 for (j = 0; j < p->nlevels; j++)
1485 cpumask_copy(
1486 &p->levels[j].num_cpu_votes,
1487 &p->num_children_in_sync);
1488 spin_unlock(&p->sync_lock);
1489 p = p->parent;
1490 }
1491 }
1492 ret = cpuidle_register_cpu(lpm_cpu->drv,
1493 &lpm_cpu->related_cpus);
1494
1495 if (ret) {
1496 kfree(lpm_cpu->drv);
1497 return -ENOMEM;
1498 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001499 }
1500 return 0;
1501}
1502
1503/**
1504 * init_lpm - initializes the governor
1505 */
1506static int __init init_lpm(void)
1507{
1508 return cpuidle_register_governor(&lpm_governor);
1509}
1510
1511postcore_initcall(init_lpm);
1512
1513static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1514 struct lpm_cluster *parent)
1515{
1516 const char **level_name;
1517 int i;
1518
1519 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1520
1521 if (!level_name)
1522 return;
1523
1524 for (i = 0; i < cpu->nlevels; i++)
1525 level_name[i] = cpu->levels[i].name;
1526
1527 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001528 parent->stats, &cpu->related_cpus);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001529
1530 kfree(level_name);
1531}
1532
1533static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1534 struct lpm_cluster *parent)
1535{
1536 const char **level_name;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001537 struct lpm_cluster *child;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001538 struct lpm_cpu *cpu;
1539 int i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001540
1541 if (!cl)
1542 return;
1543
1544 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1545
1546 if (!level_name)
1547 return;
1548
1549 for (i = 0; i < cl->nlevels; i++)
1550 level_name[i] = cl->levels[i].level_name;
1551
1552 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1553 cl->nlevels, parent ? parent->stats : NULL, NULL);
1554
1555 kfree(level_name);
1556
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001557 list_for_each_entry(cpu, &cl->cpu, list) {
1558 pr_err("%s()\n", __func__);
1559 register_cpu_lpm_stats(cpu, cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001560 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001561 if (!list_empty(&cl->cpu))
1562 return;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001563
1564 list_for_each_entry(child, &cl->child, list)
1565 register_cluster_lpm_stats(child, cl);
1566}
1567
1568static int lpm_suspend_prepare(void)
1569{
1570 suspend_in_progress = true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001571 lpm_stats_suspend_enter();
1572
1573 return 0;
1574}
1575
1576static void lpm_suspend_wake(void)
1577{
1578 suspend_in_progress = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001579 lpm_stats_suspend_exit();
1580}
1581
1582static int lpm_suspend_enter(suspend_state_t state)
1583{
1584 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001585 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
1586 struct lpm_cluster *cluster = lpm_cpu->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001587 const struct cpumask *cpumask = get_cpu_mask(cpu);
1588 int idx;
1589
1590 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
1591
1592 if (lpm_cpu_mode_allow(cpu, idx, false))
1593 break;
1594 }
1595 if (idx < 0) {
1596 pr_err("Failed suspend\n");
1597 return 0;
1598 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001599 cpu_prepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001600 cluster_prepare(cluster, cpumask, idx, false, 0);
1601 if (idx > 0)
1602 update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
1603 0xdeaffeed, false);
1604
1605 /*
1606 * Print the clocks which are enabled during system suspend
1607 * This debug information is useful to know which are the
1608 * clocks that are enabled and preventing the system level
1609 * LPMs(XO and Vmin).
1610 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001611
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001612 psci_enter_sleep(lpm_cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001613
1614 if (idx > 0)
1615 update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
1616 false);
1617
1618 cluster_unprepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001619 cpu_unprepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001620 return 0;
1621}
1622
1623static const struct platform_suspend_ops lpm_suspend_ops = {
1624 .enter = lpm_suspend_enter,
1625 .valid = suspend_valid_only_mem,
1626 .prepare_late = lpm_suspend_prepare,
1627 .wake = lpm_suspend_wake,
1628};
1629
1630static int lpm_probe(struct platform_device *pdev)
1631{
1632 int ret;
1633 int size;
1634 struct kobject *module_kobj = NULL;
1635
1636 get_online_cpus();
1637 lpm_root_node = lpm_of_parse_cluster(pdev);
1638
1639 if (IS_ERR_OR_NULL(lpm_root_node)) {
1640 pr_err("%s(): Failed to probe low power modes\n", __func__);
1641 put_online_cpus();
1642 return PTR_ERR(lpm_root_node);
1643 }
1644
1645 if (print_parsed_dt)
1646 cluster_dt_walkthrough(lpm_root_node);
1647
1648 /*
1649 * Register hotplug notifier before broadcast time to ensure there
1650 * to prevent race where a broadcast timer might not be setup on for a
1651 * core. BUG in existing code but no known issues possibly because of
1652 * how late lpm_levels gets initialized.
1653 */
1654 suspend_set_ops(&lpm_suspend_ops);
1655 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1656 hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1657 cluster_timer_init(lpm_root_node);
1658
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001659 size = num_dbg_elements * sizeof(struct lpm_debug);
1660 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1661 &lpm_debug_phys, GFP_KERNEL);
1662 register_cluster_lpm_stats(lpm_root_node, NULL);
1663
1664 ret = cluster_cpuidle_register(lpm_root_node);
1665 put_online_cpus();
1666 if (ret) {
1667 pr_err("%s()Failed to register with cpuidle framework\n",
1668 __func__);
1669 goto failed;
1670 }
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -06001671 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1672 "AP_QCOM_SLEEP_STARTING",
1673 lpm_starting_cpu, lpm_dying_cpu);
1674 if (ret)
1675 goto failed;
1676
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001677 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1678 if (!module_kobj) {
1679 pr_err("%s: cannot find kobject for module %s\n",
1680 __func__, KBUILD_MODNAME);
1681 ret = -ENOENT;
1682 goto failed;
1683 }
1684
1685 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1686 if (ret) {
1687 pr_err("%s(): Failed to create cluster level nodes\n",
1688 __func__);
1689 goto failed;
1690 }
1691
1692 return 0;
1693failed:
1694 free_cluster_node(lpm_root_node);
1695 lpm_root_node = NULL;
1696 return ret;
1697}
1698
1699static const struct of_device_id lpm_mtch_tbl[] = {
1700 {.compatible = "qcom,lpm-levels"},
1701 {},
1702};
1703
1704static struct platform_driver lpm_driver = {
1705 .probe = lpm_probe,
1706 .driver = {
1707 .name = "lpm-levels",
1708 .owner = THIS_MODULE,
1709 .of_match_table = lpm_mtch_tbl,
1710 },
1711};
1712
1713static int __init lpm_levels_module_init(void)
1714{
1715 int rc;
1716
1717 rc = platform_driver_register(&lpm_driver);
1718 if (rc) {
1719 pr_info("Error registering %s\n", lpm_driver.driver.name);
1720 goto fail;
1721 }
1722
1723fail:
1724 return rc;
1725}
1726late_initcall(lpm_levels_module_init);