blob: a9b695af52715e280908afb6c334973e03329fae [file] [log] [blame]
Nitin Bandware267e042018-01-08 15:38:31 -08001/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
3 * Copyright (C) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
Maulik Shah296ddb12017-07-03 12:25:54 +053016#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
17
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070018#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/mutex.h>
24#include <linux/cpu.h>
25#include <linux/of.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070026#include <linux/hrtimer.h>
27#include <linux/ktime.h>
28#include <linux/tick.h>
29#include <linux/suspend.h>
30#include <linux/pm_qos.h>
31#include <linux/of_platform.h>
32#include <linux/smp.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070033#include <linux/dma-mapping.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070034#include <linux/moduleparam.h>
35#include <linux/sched.h>
36#include <linux/cpu_pm.h>
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -060037#include <linux/cpuhotplug.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070038#include <soc/qcom/pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070039#include <soc/qcom/event_timer.h>
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +053040#include <soc/qcom/lpm_levels.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070041#include <soc/qcom/lpm-stats.h>
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +053042#include <soc/qcom/minidump.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070043#include <asm/arch_timer.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070044#include <asm/suspend.h>
45#include <asm/cpuidle.h>
46#include "lpm-levels.h"
47#include <trace/events/power.h>
Shefali Jain0dc6e782017-11-27 13:06:27 +053048#if defined(CONFIG_COMMON_CLK)
Maulik Shahdf5cbe32017-08-17 23:22:34 +053049#include "../clk/clk.h"
Shefali Jain0dc6e782017-11-27 13:06:27 +053050#elif defined(CONFIG_COMMON_CLK_MSM)
51#include "../../drivers/clk/msm/clock.h"
52#endif /* CONFIG_COMMON_CLK */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070053#define CREATE_TRACE_POINTS
54#include <trace/events/trace_msm_low_power.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070055
56#define SCLK_HZ (32768)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070057#define PSCI_POWER_STATE(reset) (reset << 30)
58#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070059#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070060
61enum {
62 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
63 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
64};
65
66enum debug_event {
67 CPU_ENTER,
68 CPU_EXIT,
69 CLUSTER_ENTER,
70 CLUSTER_EXIT,
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -060071 CPU_HP_STARTING,
72 CPU_HP_DYING,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070073};
74
75struct lpm_debug {
76 cycle_t time;
77 enum debug_event evt;
78 int cpu;
79 uint32_t arg1;
80 uint32_t arg2;
81 uint32_t arg3;
82 uint32_t arg4;
83};
84
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +053085static struct system_pm_ops *sys_pm_ops;
86
87
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070088struct lpm_cluster *lpm_root_node;
89
90#define MAXSAMPLES 5
91
92static bool lpm_prediction = true;
93module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
94
Srinivas Rao L3662b472017-10-13 23:11:21 +053095static uint32_t ref_stddev = 500;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070096module_param_named(ref_stddev, ref_stddev, uint, 0664);
97
Srinivas Rao L3662b472017-10-13 23:11:21 +053098static uint32_t tmr_add = 1000;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070099module_param_named(tmr_add, tmr_add, uint, 0664);
100
Srinivas Rao L3662b472017-10-13 23:11:21 +0530101static uint32_t ref_premature_cnt = 1;
Dilip Gudlur3bbafc02017-07-25 12:46:31 -0700102static uint32_t bias_hyst;
103module_param_named(bias_hyst, bias_hyst, uint, 0664);
104
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700105struct lpm_history {
106 uint32_t resi[MAXSAMPLES];
107 int mode[MAXSAMPLES];
108 int nsamp;
109 uint32_t hptr;
110 uint32_t hinvalid;
111 uint32_t htmr_wkup;
112 int64_t stime;
113};
114
115static DEFINE_PER_CPU(struct lpm_history, hist);
116
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600117static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700118static bool suspend_in_progress;
119static struct hrtimer lpm_hrtimer;
120static struct hrtimer histtimer;
121static struct lpm_debug *lpm_debug;
122static phys_addr_t lpm_debug_phys;
123static const int num_dbg_elements = 0x100;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700124
125static void cluster_unprepare(struct lpm_cluster *cluster,
126 const struct cpumask *cpu, int child_idx, bool from_idle,
127 int64_t time);
128static void cluster_prepare(struct lpm_cluster *cluster,
129 const struct cpumask *cpu, int child_idx, bool from_idle,
130 int64_t time);
131
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700132static bool print_parsed_dt;
133module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
134
135static bool sleep_disabled;
136module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
137
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700138/**
139 * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
140 *
141 * Returns an s32 latency value
142 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700143s32 msm_cpuidle_get_deep_idle_latency(void)
144{
145 return 10;
146}
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700147EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700148
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530149uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700150{
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530151 if (sys_pm_ops)
152 return -EUSERS;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700153
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530154 sys_pm_ops = pm_ops;
155
156 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700157}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700158
159static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
160 struct latency_level *lat_level)
161{
162 struct list_head *list;
163 struct lpm_cluster_level *level;
164 struct lpm_cluster *n;
165 struct power_params *pwr_params;
166 uint32_t latency = 0;
167 int i;
168
169 if (!cluster->list.next) {
170 for (i = 0; i < cluster->nlevels; i++) {
171 level = &cluster->levels[i];
172 pwr_params = &level->pwr;
173 if (lat_level->reset_level == level->reset_level) {
174 if ((latency > pwr_params->latency_us)
175 || (!latency))
176 latency = pwr_params->latency_us;
177 break;
178 }
179 }
180 } else {
181 list_for_each(list, &cluster->parent->child) {
182 n = list_entry(list, typeof(*n), list);
183 if (lat_level->level_name) {
184 if (strcmp(lat_level->level_name,
185 n->cluster_name))
186 continue;
187 }
188 for (i = 0; i < n->nlevels; i++) {
189 level = &n->levels[i];
190 pwr_params = &level->pwr;
191 if (lat_level->reset_level ==
192 level->reset_level) {
193 if ((latency > pwr_params->latency_us)
194 || (!latency))
195 latency =
196 pwr_params->latency_us;
197 break;
198 }
199 }
200 }
201 }
202 return latency;
203}
204
205static uint32_t least_cpu_latency(struct list_head *child,
206 struct latency_level *lat_level)
207{
208 struct list_head *list;
209 struct lpm_cpu_level *level;
210 struct power_params *pwr_params;
211 struct lpm_cpu *cpu;
212 struct lpm_cluster *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600213 uint32_t lat = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700214 int i;
215
216 list_for_each(list, child) {
217 n = list_entry(list, typeof(*n), list);
218 if (lat_level->level_name) {
219 if (strcmp(lat_level->level_name, n->cluster_name))
220 continue;
221 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600222 list_for_each_entry(cpu, &n->cpu, list) {
223 for (i = 0; i < cpu->nlevels; i++) {
224 level = &cpu->levels[i];
225 pwr_params = &level->pwr;
226 if (lat_level->reset_level
227 == level->reset_level) {
228 if ((lat > pwr_params->latency_us)
229 || (!lat))
230 lat = pwr_params->latency_us;
231 break;
232 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700233 }
234 }
235 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600236 return lat;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700237}
238
239static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
240 int affinity_level)
241{
242 struct lpm_cluster *n;
243
244 if ((cluster->aff_level == affinity_level)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600245 || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700246 return cluster;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600247 else if (list_empty(&cluster->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700248 n = list_entry(cluster->child.next, typeof(*n), list);
249 return cluster_aff_match(n, affinity_level);
250 } else
251 return NULL;
252}
253
254int lpm_get_latency(struct latency_level *level, uint32_t *latency)
255{
256 struct lpm_cluster *cluster;
257 uint32_t val;
258
259 if (!lpm_root_node) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530260 pr_err("lpm_probe not completed\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700261 return -EAGAIN;
262 }
263
264 if ((level->affinity_level < 0)
265 || (level->affinity_level > lpm_root_node->aff_level)
266 || (level->reset_level < LPM_RESET_LVL_RET)
267 || (level->reset_level > LPM_RESET_LVL_PC)
268 || !latency)
269 return -EINVAL;
270
271 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
272 if (!cluster) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530273 pr_err("No matching cluster found for affinity_level:%d\n",
274 level->affinity_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700275 return -EINVAL;
276 }
277
278 if (level->affinity_level == 0)
279 val = least_cpu_latency(&cluster->parent->child, level);
280 else
281 val = least_cluster_latency(cluster, level);
282
283 if (!val) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530284 pr_err("No mode with affinity_level:%d reset_level:%d\n",
285 level->affinity_level, level->reset_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700286 return -EINVAL;
287 }
288
289 *latency = val;
290
291 return 0;
292}
293EXPORT_SYMBOL(lpm_get_latency);
294
295static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
296 uint32_t arg2, uint32_t arg3, uint32_t arg4)
297{
298 struct lpm_debug *dbg;
299 int idx;
300 static DEFINE_SPINLOCK(debug_lock);
301 static int pc_event_index;
302
303 if (!lpm_debug)
304 return;
305
306 spin_lock(&debug_lock);
307 idx = pc_event_index++;
308 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
309
310 dbg->evt = event;
311 dbg->time = arch_counter_get_cntvct();
312 dbg->cpu = raw_smp_processor_id();
313 dbg->arg1 = arg1;
314 dbg->arg2 = arg2;
315 dbg->arg3 = arg3;
316 dbg->arg4 = arg4;
317 spin_unlock(&debug_lock);
318}
319
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600320static int lpm_dying_cpu(unsigned int cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700321{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600322 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700323
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600324 update_debug_pc_event(CPU_HP_DYING, cpu,
325 cluster->num_children_in_sync.bits[0],
326 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600327 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
328 return 0;
329}
330
331static int lpm_starting_cpu(unsigned int cpu)
332{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600333 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600334
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600335 update_debug_pc_event(CPU_HP_STARTING, cpu,
336 cluster->num_children_in_sync.bits[0],
337 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600338 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
339 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700340}
341
342static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
343{
344 return HRTIMER_NORESTART;
345}
346
347static void histtimer_cancel(void)
348{
349 hrtimer_try_to_cancel(&histtimer);
350}
351
352static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
353{
354 int cpu = raw_smp_processor_id();
355 struct lpm_history *history = &per_cpu(hist, cpu);
356
357 history->hinvalid = 1;
358 return HRTIMER_NORESTART;
359}
360
361static void histtimer_start(uint32_t time_us)
362{
363 uint64_t time_ns = time_us * NSEC_PER_USEC;
364 ktime_t hist_ktime = ns_to_ktime(time_ns);
365
366 histtimer.function = histtimer_fn;
367 hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
368}
369
370static void cluster_timer_init(struct lpm_cluster *cluster)
371{
372 struct list_head *list;
373
374 if (!cluster)
375 return;
376
377 hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
378
379 list_for_each(list, &cluster->child) {
380 struct lpm_cluster *n;
381
382 n = list_entry(list, typeof(*n), list);
383 cluster_timer_init(n);
384 }
385}
386
387static void clusttimer_cancel(void)
388{
389 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600390 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700391
392 hrtimer_try_to_cancel(&cluster->histtimer);
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700393
394 if (cluster->parent)
395 hrtimer_try_to_cancel(&cluster->parent->histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700396}
397
398static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
399{
400 struct lpm_cluster *cluster = container_of(h,
401 struct lpm_cluster, histtimer);
402
403 cluster->history.hinvalid = 1;
404 return HRTIMER_NORESTART;
405}
406
407static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
408{
409 uint64_t time_ns = time_us * NSEC_PER_USEC;
410 ktime_t clust_ktime = ns_to_ktime(time_ns);
411
412 cluster->histtimer.function = clusttimer_fn;
413 hrtimer_start(&cluster->histtimer, clust_ktime,
414 HRTIMER_MODE_REL_PINNED);
415}
416
417static void msm_pm_set_timer(uint32_t modified_time_us)
418{
419 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
420 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
421
422 lpm_hrtimer.function = lpm_hrtimer_cb;
423 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
424}
425
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700426static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
427 struct lpm_cpu *cpu, int *idx_restrict,
428 uint32_t *idx_restrict_time)
429{
430 int i, j, divisor;
431 uint64_t max, avg, stddev;
432 int64_t thresh = LLONG_MAX;
433 struct lpm_history *history = &per_cpu(hist, dev->cpu);
434 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
Srinivas Rao L3662b472017-10-13 23:11:21 +0530435 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700436
Mahesh Sivasubramanian73810922017-10-16 16:46:56 -0600437 if (!lpm_prediction || !cpu->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700438 return 0;
439
440 /*
441 * Samples are marked invalid when woken-up due to timer,
442 * so donot predict.
443 */
444 if (history->hinvalid) {
445 history->hinvalid = 0;
446 history->htmr_wkup = 1;
447 history->stime = 0;
448 return 0;
449 }
450
451 /*
452 * Predict only when all the samples are collected.
453 */
454 if (history->nsamp < MAXSAMPLES) {
455 history->stime = 0;
456 return 0;
457 }
458
459 /*
460 * Check if the samples are not much deviated, if so use the
461 * average of those as predicted sleep time. Else if any
462 * specific mode has more premature exits return the index of
463 * that mode.
464 */
465
466again:
467 max = avg = divisor = stddev = 0;
468 for (i = 0; i < MAXSAMPLES; i++) {
469 int64_t value = history->resi[i];
470
471 if (value <= thresh) {
472 avg += value;
473 divisor++;
474 if (value > max)
475 max = value;
476 }
477 }
478 do_div(avg, divisor);
479
480 for (i = 0; i < MAXSAMPLES; i++) {
481 int64_t value = history->resi[i];
482
483 if (value <= thresh) {
484 int64_t diff = value - avg;
485
486 stddev += diff * diff;
487 }
488 }
489 do_div(stddev, divisor);
490 stddev = int_sqrt(stddev);
491
492 /*
493 * If the deviation is less, return the average, else
494 * ignore one maximum sample and retry
495 */
496 if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
497 || stddev <= ref_stddev) {
498 history->stime = ktime_to_us(ktime_get()) + avg;
499 return avg;
500 } else if (divisor > (MAXSAMPLES - 1)) {
501 thresh = max - 1;
502 goto again;
503 }
504
505 /*
506 * Find the number of premature exits for each of the mode,
507 * excluding clockgating mode, and they are more than fifty
508 * percent restrict that and deeper modes.
509 */
510 if (history->htmr_wkup != 1) {
511 for (j = 1; j < cpu->nlevels; j++) {
512 uint32_t failed = 0;
513 uint64_t total = 0;
514
515 for (i = 0; i < MAXSAMPLES; i++) {
516 if ((history->mode[i] == j) &&
517 (history->resi[i] < min_residency[j])) {
518 failed++;
519 total += history->resi[i];
520 }
521 }
Srinivas Rao L3662b472017-10-13 23:11:21 +0530522 if (failed >= ref_premature_cnt) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700523 *idx_restrict = j;
524 do_div(total, failed);
Srinivas Rao L3662b472017-10-13 23:11:21 +0530525 for (i = 0; i < j; i++) {
526 if (total < max_residency[i]) {
527 *idx_restrict = i+1;
528 total = max_residency[i];
529 break;
530 }
531 }
532
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700533 *idx_restrict_time = total;
534 history->stime = ktime_to_us(ktime_get())
535 + *idx_restrict_time;
536 break;
537 }
538 }
539 }
540 return 0;
541}
542
543static inline void invalidate_predict_history(struct cpuidle_device *dev)
544{
545 struct lpm_history *history = &per_cpu(hist, dev->cpu);
546
547 if (!lpm_prediction)
548 return;
549
550 if (history->hinvalid) {
551 history->hinvalid = 0;
552 history->htmr_wkup = 1;
553 history->stime = 0;
554 }
555}
556
557static void clear_predict_history(void)
558{
559 struct lpm_history *history;
560 int i;
561 unsigned int cpu;
562
563 if (!lpm_prediction)
564 return;
565
566 for_each_possible_cpu(cpu) {
567 history = &per_cpu(hist, cpu);
568 for (i = 0; i < MAXSAMPLES; i++) {
569 history->resi[i] = 0;
570 history->mode[i] = -1;
571 history->hptr = 0;
572 history->nsamp = 0;
573 history->stime = 0;
574 }
575 }
576}
577
578static void update_history(struct cpuidle_device *dev, int idx);
579
Lina Iyer634bfda2017-07-06 16:16:57 -0600580static inline bool is_cpu_biased(int cpu)
581{
582 u64 now = sched_clock();
583 u64 last = sched_get_cpu_last_busy_time(cpu);
584
585 if (!last)
586 return false;
587
588 return (now - last) < BIAS_HYST;
589}
590
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700591static int cpu_power_select(struct cpuidle_device *dev,
592 struct lpm_cpu *cpu)
593{
Lina Iyerb4a0c3d2017-07-17 11:50:25 -0600594 int best_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700595 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
596 dev->cpu);
Naresh Malladi64b40552017-07-03 15:53:48 +0530597 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700598 uint32_t modified_time_us = 0;
599 uint32_t next_event_us = 0;
600 int i, idx_restrict;
601 uint32_t lvl_latency_us = 0;
602 uint64_t predicted = 0;
603 uint32_t htime = 0, idx_restrict_time = 0;
Naresh Malladi64b40552017-07-03 15:53:48 +0530604 uint32_t next_wakeup_us = (uint32_t)sleep_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700605 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
606 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
607
Maulik Shah32b352c2017-04-18 20:48:44 +0530608 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
Maulik Shah4275b222017-03-06 11:04:39 +0530609 return best_level;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700610
611 idx_restrict = cpu->nlevels + 1;
612
613 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
614
Nitin Bandware267e042018-01-08 15:38:31 -0800615 if (is_cpu_biased(dev->cpu) && (!cpu_isolated(dev->cpu)))
Lina Iyer634bfda2017-07-06 16:16:57 -0600616 goto done_select;
Lina Iyer634bfda2017-07-06 16:16:57 -0600617
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700618 for (i = 0; i < cpu->nlevels; i++) {
619 struct lpm_cpu_level *level = &cpu->levels[i];
620 struct power_params *pwr_params = &level->pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700621 bool allow;
622
Mahesh Sivasubramanian73810922017-10-16 16:46:56 -0600623 allow = i ? lpm_cpu_mode_allow(dev->cpu, i, true) : true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700624
625 if (!allow)
626 continue;
627
628 lvl_latency_us = pwr_params->latency_us;
629
630 if (latency_us < lvl_latency_us)
631 break;
632
633 if (next_event_us) {
634 if (next_event_us < lvl_latency_us)
635 break;
636
637 if (((next_event_us - lvl_latency_us) < sleep_us) ||
638 (next_event_us < sleep_us))
639 next_wakeup_us = next_event_us - lvl_latency_us;
640 }
641
Maulik Shah92a21202017-11-30 17:09:29 +0530642 if (!i && !cpu_isolated(dev->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700643 /*
644 * If the next_wake_us itself is not sufficient for
645 * deeper low power modes than clock gating do not
646 * call prediction.
647 */
648 if (next_wakeup_us > max_residency[i]) {
649 predicted = lpm_cpuidle_predict(dev, cpu,
650 &idx_restrict, &idx_restrict_time);
Srinivas Rao Le3af7dc2017-05-31 16:06:52 +0530651 if (predicted && (predicted < min_residency[i]))
652 predicted = min_residency[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700653 } else
654 invalidate_predict_history(dev);
655 }
656
657 if (i >= idx_restrict)
658 break;
659
660 best_level = i;
661
Maulik Shah296ddb12017-07-03 12:25:54 +0530662 if (next_event_us && next_event_us < sleep_us && !i)
663 modified_time_us = next_event_us - lvl_latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700664 else
665 modified_time_us = 0;
666
667 if (predicted ? (predicted <= max_residency[i])
668 : (next_wakeup_us <= max_residency[i]))
669 break;
670 }
671
672 if (modified_time_us)
673 msm_pm_set_timer(modified_time_us);
674
675 /*
676 * Start timer to avoid staying in shallower mode forever
677 * incase of misprediciton
678 */
679 if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
680 && ((best_level >= 0)
681 && (best_level < (cpu->nlevels-1)))) {
682 htime = predicted + tmr_add;
683 if (htime == tmr_add)
684 htime = idx_restrict_time;
685 else if (htime > max_residency[best_level])
686 htime = max_residency[best_level];
687
688 if ((next_wakeup_us > htime) &&
689 ((next_wakeup_us - htime) > max_residency[best_level]))
690 histtimer_start(htime);
691 }
692
Lina Iyer634bfda2017-07-06 16:16:57 -0600693done_select:
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700694 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
695
696 trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
697 predicted, htime);
698
699 return best_level;
700}
701
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530702static unsigned int get_next_online_cpu(bool from_idle)
703{
704 unsigned int cpu;
705 ktime_t next_event;
706 unsigned int next_cpu = raw_smp_processor_id();
707
708 if (!from_idle)
709 return next_cpu;
710 next_event.tv64 = KTIME_MAX;
711 for_each_online_cpu(cpu) {
712 ktime_t *next_event_c;
713
714 next_event_c = get_next_event_cpu(cpu);
715 if (next_event_c->tv64 < next_event.tv64) {
716 next_event.tv64 = next_event_c->tv64;
717 next_cpu = cpu;
718 }
719 }
720 return next_cpu;
721}
722
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700723static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530724 bool from_idle, uint32_t *pred_time)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700725{
726 int cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700727 ktime_t next_event;
728 struct cpumask online_cpus_in_cluster;
729 struct lpm_history *history;
730 int64_t prediction = LONG_MAX;
731
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530732 if (!from_idle)
733 return ~0ULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700734
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530735 next_event.tv64 = KTIME_MAX;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700736 cpumask_and(&online_cpus_in_cluster,
737 &cluster->num_children_in_sync, cpu_online_mask);
738
739 for_each_cpu(cpu, &online_cpus_in_cluster) {
740 ktime_t *next_event_c;
741
742 next_event_c = get_next_event_cpu(cpu);
743 if (next_event_c->tv64 < next_event.tv64) {
744 next_event.tv64 = next_event_c->tv64;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700745 }
746
747 if (from_idle && lpm_prediction) {
748 history = &per_cpu(hist, cpu);
749 if (history->stime && (history->stime < prediction))
750 prediction = history->stime;
751 }
752 }
753
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700754 if (from_idle && lpm_prediction) {
755 if (prediction > ktime_to_us(ktime_get()))
756 *pred_time = prediction - ktime_to_us(ktime_get());
757 }
758
759 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
760 return ktime_to_us(ktime_sub(next_event, ktime_get()));
761 else
762 return 0;
763}
764
765static int cluster_predict(struct lpm_cluster *cluster,
766 uint32_t *pred_us)
767{
768 int i, j;
769 int ret = 0;
770 struct cluster_history *history = &cluster->history;
771 int64_t cur_time = ktime_to_us(ktime_get());
772
773 if (!lpm_prediction)
774 return 0;
775
776 if (history->hinvalid) {
777 history->hinvalid = 0;
778 history->htmr_wkup = 1;
779 history->flag = 0;
780 return ret;
781 }
782
783 if (history->nsamp == MAXSAMPLES) {
784 for (i = 0; i < MAXSAMPLES; i++) {
785 if ((cur_time - history->stime[i])
786 > CLUST_SMPL_INVLD_TIME)
787 history->nsamp--;
788 }
789 }
790
791 if (history->nsamp < MAXSAMPLES) {
792 history->flag = 0;
793 return ret;
794 }
795
796 if (history->flag == 2)
797 history->flag = 0;
798
799 if (history->htmr_wkup != 1) {
800 uint64_t total = 0;
801
802 if (history->flag == 1) {
803 for (i = 0; i < MAXSAMPLES; i++)
804 total += history->resi[i];
805 do_div(total, MAXSAMPLES);
806 *pred_us = total;
807 return 2;
808 }
809
810 for (j = 1; j < cluster->nlevels; j++) {
811 uint32_t failed = 0;
812
813 total = 0;
814 for (i = 0; i < MAXSAMPLES; i++) {
815 if ((history->mode[i] == j) && (history->resi[i]
816 < cluster->levels[j].pwr.min_residency)) {
817 failed++;
818 total += history->resi[i];
819 }
820 }
821
822 if (failed > (MAXSAMPLES-2)) {
823 do_div(total, failed);
824 *pred_us = total;
825 history->flag = 1;
826 return 1;
827 }
828 }
829 }
830
831 return ret;
832}
833
834static void update_cluster_history_time(struct cluster_history *history,
835 int idx, uint64_t start)
836{
837 history->entry_idx = idx;
838 history->entry_time = start;
839}
840
841static void update_cluster_history(struct cluster_history *history, int idx)
842{
843 uint32_t tmr = 0;
844 uint32_t residency = 0;
845 struct lpm_cluster *cluster =
846 container_of(history, struct lpm_cluster, history);
847
848 if (!lpm_prediction)
849 return;
850
851 if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
852 residency = ktime_to_us(ktime_get()) - history->entry_time;
853 history->stime[history->hptr] = history->entry_time;
854 } else
855 return;
856
857 if (history->htmr_wkup) {
858 if (!history->hptr)
859 history->hptr = MAXSAMPLES-1;
860 else
861 history->hptr--;
862
863 history->resi[history->hptr] += residency;
864
865 history->htmr_wkup = 0;
866 tmr = 1;
Maulik Shah296ddb12017-07-03 12:25:54 +0530867 } else
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700868 history->resi[history->hptr] = residency;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700869
870 history->mode[history->hptr] = idx;
871
872 history->entry_idx = INT_MIN;
873 history->entry_time = 0;
874
875 if (history->nsamp < MAXSAMPLES)
876 history->nsamp++;
877
878 trace_cluster_pred_hist(cluster->cluster_name,
879 history->mode[history->hptr], history->resi[history->hptr],
880 history->hptr, tmr);
881
882 (history->hptr)++;
883
884 if (history->hptr >= MAXSAMPLES)
885 history->hptr = 0;
886}
887
888static void clear_cl_history_each(struct cluster_history *history)
889{
890 int i;
891
892 for (i = 0; i < MAXSAMPLES; i++) {
893 history->resi[i] = 0;
894 history->mode[i] = -1;
895 history->stime[i] = 0;
896 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530897
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700898 history->hptr = 0;
899 history->nsamp = 0;
900 history->flag = 0;
901 history->hinvalid = 0;
902 history->htmr_wkup = 0;
903}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700904static void clear_cl_predict_history(void)
905{
906 struct lpm_cluster *cluster = lpm_root_node;
907 struct list_head *list;
908
909 if (!lpm_prediction)
910 return;
911
912 clear_cl_history_each(&cluster->history);
913
914 list_for_each(list, &cluster->child) {
915 struct lpm_cluster *n;
916
917 n = list_entry(list, typeof(*n), list);
918 clear_cl_history_each(&n->history);
919 }
920}
921
922static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
923 int *ispred)
924{
925 int best_level = -1;
926 int i;
927 struct cpumask mask;
928 uint32_t latency_us = ~0U;
929 uint32_t sleep_us;
930 uint32_t cpupred_us = 0, pred_us = 0;
931 int pred_mode = 0, predicted = 0;
932
933 if (!cluster)
934 return -EINVAL;
935
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530936 sleep_us = (uint32_t)get_cluster_sleep_time(cluster,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700937 from_idle, &cpupred_us);
938
939 if (from_idle) {
940 pred_mode = cluster_predict(cluster, &pred_us);
941
942 if (cpupred_us && pred_mode && (cpupred_us < pred_us))
943 pred_us = cpupred_us;
944
945 if (pred_us && pred_mode && (pred_us < sleep_us))
946 predicted = 1;
947
948 if (predicted && (pred_us == cpupred_us))
949 predicted = 2;
950 }
951
952 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
953 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
954 &mask);
955
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700956 for (i = 0; i < cluster->nlevels; i++) {
957 struct lpm_cluster_level *level = &cluster->levels[i];
958 struct power_params *pwr_params = &level->pwr;
959
960 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
961 continue;
962
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700963 if (!cpumask_equal(&cluster->num_children_in_sync,
964 &level->num_cpu_votes))
965 continue;
966
967 if (from_idle && latency_us < pwr_params->latency_us)
968 break;
969
970 if (sleep_us < pwr_params->time_overhead_us)
971 break;
972
973 if (suspend_in_progress && from_idle && level->notify_rpm)
974 continue;
975
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530976 if (level->notify_rpm) {
977 if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
978 continue;
979 if (!sys_pm_ops->sleep_allowed())
980 continue;
981 }
Lina Iyerc28bf7d2017-08-14 19:28:46 -0600982
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700983 best_level = i;
984
Srinivas Rao L1f3e4d52016-12-29 18:49:54 +0530985 if (from_idle &&
986 (predicted ? (pred_us <= pwr_params->max_residency)
987 : (sleep_us <= pwr_params->max_residency)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700988 break;
989 }
990
991 if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
992 cluster->history.flag = 2;
993
994 *ispred = predicted;
995
996 trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
997 latency_us, predicted, pred_us);
998
999 return best_level;
1000}
1001
1002static void cluster_notify(struct lpm_cluster *cluster,
1003 struct lpm_cluster_level *level, bool enter)
1004{
1005 if (level->is_reset && enter)
1006 cpu_cluster_pm_enter(cluster->aff_level);
1007 else if (level->is_reset && !enter)
1008 cpu_cluster_pm_exit(cluster->aff_level);
1009}
1010
1011static int cluster_configure(struct lpm_cluster *cluster, int idx,
1012 bool from_idle, int predicted)
1013{
1014 struct lpm_cluster_level *level = &cluster->levels[idx];
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301015 struct cpumask online_cpus, cpumask;
1016 unsigned int cpu;
Naresh Malladib607ea92017-06-16 11:56:21 +05301017
1018 cpumask_and(&online_cpus, &cluster->num_children_in_sync,
1019 cpu_online_mask);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001020
1021 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
Naresh Malladib607ea92017-06-16 11:56:21 +05301022 || is_IPI_pending(&online_cpus)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001023 return -EPERM;
1024 }
1025
1026 if (idx != cluster->default_level) {
1027 update_debug_pc_event(CLUSTER_ENTER, idx,
1028 cluster->num_children_in_sync.bits[0],
1029 cluster->child_cpus.bits[0], from_idle);
1030 trace_cluster_enter(cluster->cluster_name, idx,
1031 cluster->num_children_in_sync.bits[0],
1032 cluster->child_cpus.bits[0], from_idle);
1033 lpm_stats_cluster_enter(cluster->stats, idx);
1034
1035 if (from_idle && lpm_prediction)
1036 update_cluster_history_time(&cluster->history, idx,
1037 ktime_to_us(ktime_get()));
1038 }
1039
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001040 if (level->notify_rpm) {
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001041 /*
1042 * Print the clocks which are enabled during system suspend
1043 * This debug information is useful to know which are the
1044 * clocks that are enabled and preventing the system level
1045 * LPMs(XO and Vmin).
1046 */
1047 if (!from_idle)
1048 clock_debug_print_enabled(true);
1049
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301050 cpu = get_next_online_cpu(from_idle);
1051 cpumask_copy(&cpumask, cpumask_of(cpu));
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001052
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001053 clear_predict_history();
1054 clear_cl_predict_history();
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301055 if (sys_pm_ops && sys_pm_ops->enter)
1056 if ((sys_pm_ops->enter(&cpumask)))
1057 return -EBUSY;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001058 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001059 /* Notify cluster enter event after successfully config completion */
1060 cluster_notify(cluster, level, true);
1061
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001062 cluster->last_level = idx;
1063
1064 if (predicted && (idx < (cluster->nlevels - 1))) {
1065 struct power_params *pwr_params = &cluster->levels[idx].pwr;
1066
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001067 clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001068 }
1069
1070 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001071}
1072
1073static void cluster_prepare(struct lpm_cluster *cluster,
1074 const struct cpumask *cpu, int child_idx, bool from_idle,
1075 int64_t start_time)
1076{
1077 int i;
1078 int predicted = 0;
1079
1080 if (!cluster)
1081 return;
1082
1083 if (cluster->min_child_level > child_idx)
1084 return;
1085
1086 spin_lock(&cluster->sync_lock);
1087 cpumask_or(&cluster->num_children_in_sync, cpu,
1088 &cluster->num_children_in_sync);
1089
1090 for (i = 0; i < cluster->nlevels; i++) {
1091 struct lpm_cluster_level *lvl = &cluster->levels[i];
1092
1093 if (child_idx >= lvl->min_child_level)
1094 cpumask_or(&lvl->num_cpu_votes, cpu,
1095 &lvl->num_cpu_votes);
1096 }
1097
1098 /*
1099 * cluster_select() does not make any configuration changes. So its ok
1100 * to release the lock here. If a core wakes up for a rude request,
1101 * it need not wait for another to finish its cluster selection and
1102 * configuration process
1103 */
1104
1105 if (!cpumask_equal(&cluster->num_children_in_sync,
1106 &cluster->child_cpus))
1107 goto failed;
1108
1109 i = cluster_select(cluster, from_idle, &predicted);
1110
1111 if (((i < 0) || (i == cluster->default_level))
1112 && predicted && from_idle) {
1113 update_cluster_history_time(&cluster->history,
1114 -1, ktime_to_us(ktime_get()));
1115
1116 if (i < 0) {
1117 struct power_params *pwr_params =
1118 &cluster->levels[0].pwr;
1119
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001120 clusttimer_start(cluster,
1121 pwr_params->max_residency + tmr_add);
Maulik Shah296ddb12017-07-03 12:25:54 +05301122
1123 goto failed;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001124 }
1125 }
1126
1127 if (i < 0)
1128 goto failed;
1129
1130 if (cluster_configure(cluster, i, from_idle, predicted))
1131 goto failed;
1132
1133 cluster->stats->sleep_time = start_time;
1134 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
1135 from_idle, start_time);
1136
1137 spin_unlock(&cluster->sync_lock);
1138 return;
1139failed:
1140 spin_unlock(&cluster->sync_lock);
1141 cluster->stats->sleep_time = 0;
1142}
1143
1144static void cluster_unprepare(struct lpm_cluster *cluster,
1145 const struct cpumask *cpu, int child_idx, bool from_idle,
1146 int64_t end_time)
1147{
1148 struct lpm_cluster_level *level;
1149 bool first_cpu;
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001150 int last_level, i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001151
1152 if (!cluster)
1153 return;
1154
1155 if (cluster->min_child_level > child_idx)
1156 return;
1157
1158 spin_lock(&cluster->sync_lock);
1159 last_level = cluster->default_level;
1160 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
1161 &cluster->child_cpus);
1162 cpumask_andnot(&cluster->num_children_in_sync,
1163 &cluster->num_children_in_sync, cpu);
1164
1165 for (i = 0; i < cluster->nlevels; i++) {
1166 struct lpm_cluster_level *lvl = &cluster->levels[i];
1167
1168 if (child_idx >= lvl->min_child_level)
1169 cpumask_andnot(&lvl->num_cpu_votes,
1170 &lvl->num_cpu_votes, cpu);
1171 }
1172
1173 if (from_idle && first_cpu &&
1174 (cluster->last_level == cluster->default_level))
1175 update_cluster_history(&cluster->history, cluster->last_level);
1176
1177 if (!first_cpu || cluster->last_level == cluster->default_level)
1178 goto unlock_return;
1179
1180 if (cluster->stats->sleep_time)
1181 cluster->stats->sleep_time = end_time -
1182 cluster->stats->sleep_time;
1183 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
1184
1185 level = &cluster->levels[cluster->last_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001186
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001187 if (level->notify_rpm)
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301188 if (sys_pm_ops && sys_pm_ops->exit)
1189 sys_pm_ops->exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001190
1191 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
1192 cluster->num_children_in_sync.bits[0],
1193 cluster->child_cpus.bits[0], from_idle);
1194 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
1195 cluster->num_children_in_sync.bits[0],
1196 cluster->child_cpus.bits[0], from_idle);
1197
1198 last_level = cluster->last_level;
1199 cluster->last_level = cluster->default_level;
1200
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001201 cluster_notify(cluster, &cluster->levels[last_level], false);
1202
1203 if (from_idle)
1204 update_cluster_history(&cluster->history, last_level);
1205
1206 cluster_unprepare(cluster->parent, &cluster->child_cpus,
1207 last_level, from_idle, end_time);
1208unlock_return:
1209 spin_unlock(&cluster->sync_lock);
1210}
1211
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001212static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001213 bool from_idle)
1214{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001215 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001216
1217 /* Use broadcast timer for aggregating sleep mode within a cluster.
1218 * A broadcast timer could be used in the following scenarios
1219 * 1) The architected timer HW gets reset during certain low power
1220 * modes and the core relies on a external(broadcast) timer to wake up
1221 * from sleep. This information is passed through device tree.
1222 * 2) The CPU low power mode could trigger a system low power mode.
1223 * The low power module relies on Broadcast timer to aggregate the
1224 * next wakeup within a cluster, in which case, CPU switches over to
1225 * use broadcast timer.
1226 */
Maulik Shah296ddb12017-07-03 12:25:54 +05301227
1228 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001229 cpu_pm_enter();
1230
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001231}
1232
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001233static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001234 bool from_idle)
1235{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001236 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001237
Maulik Shah296ddb12017-07-03 12:25:54 +05301238 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001239 cpu_pm_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001240}
1241
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301242static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl,
1243 bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001244{
1245 int state_id = 0;
1246
1247 if (!cluster)
1248 return 0;
1249
1250 spin_lock(&cluster->sync_lock);
1251
1252 if (!cpumask_equal(&cluster->num_children_in_sync,
1253 &cluster->child_cpus))
1254 goto unlock_and_return;
1255
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301256 state_id |= get_cluster_id(cluster->parent, aff_lvl, from_idle);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001257
1258 if (cluster->last_level != cluster->default_level) {
1259 struct lpm_cluster_level *level
1260 = &cluster->levels[cluster->last_level];
1261
1262 state_id |= (level->psci_id & cluster->psci_mode_mask)
1263 << cluster->psci_mode_shift;
Lina Iyera2243002017-12-21 01:30:38 +00001264
1265 /*
1266 * We may have updated the broadcast timers, update
1267 * the wakeup value by reading the bc timer directly.
1268 */
1269 if (level->notify_rpm)
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301270 if (sys_pm_ops && sys_pm_ops->update_wakeup)
1271 sys_pm_ops->update_wakeup(from_idle);
Lina Iyer0b988942017-11-28 10:13:12 -07001272 if (level->psci_id)
1273 (*aff_lvl)++;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001274 }
1275unlock_and_return:
1276 spin_unlock(&cluster->sync_lock);
1277 return state_id;
1278}
1279
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001280static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001281{
Maulik Shah296ddb12017-07-03 12:25:54 +05301282 int affinity_level = 0, state_id = 0, power_state = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001283 bool success = false;
1284 /*
1285 * idx = 0 is the default LPM state
1286 */
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001287
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001288 if (!idx) {
1289 stop_critical_timings();
1290 wfi();
1291 start_critical_timings();
1292 return 1;
1293 }
1294
Maulik Shah296ddb12017-07-03 12:25:54 +05301295 if (from_idle && cpu->levels[idx].use_bc_timer) {
1296 if (tick_broadcast_enter())
1297 return success;
1298 }
1299
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301300 state_id = get_cluster_id(cpu->parent, &affinity_level, from_idle);
Maulik Shah296ddb12017-07-03 12:25:54 +05301301 power_state = PSCI_POWER_STATE(cpu->levels[idx].is_reset);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001302 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
Maulik Shah296ddb12017-07-03 12:25:54 +05301303 state_id |= power_state | affinity_level | cpu->levels[idx].psci_id;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001304
1305 update_debug_pc_event(CPU_ENTER, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301306 0xdeaffeed, 0xdeaffeed, from_idle);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001307 stop_critical_timings();
Maulik Shah296ddb12017-07-03 12:25:54 +05301308
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001309 success = !arm_cpuidle_suspend(state_id);
Maulik Shah296ddb12017-07-03 12:25:54 +05301310
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001311 start_critical_timings();
1312 update_debug_pc_event(CPU_EXIT, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301313 success, 0xdeaffeed, from_idle);
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001314
1315 if (from_idle && cpu->levels[idx].use_bc_timer)
1316 tick_broadcast_exit();
1317
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001318 return success;
1319}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001320
1321static int lpm_cpuidle_select(struct cpuidle_driver *drv,
1322 struct cpuidle_device *dev)
1323{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001324 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001325
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001326 if (!cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001327 return 0;
1328
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001329 return cpu_power_select(dev, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001330}
1331
1332static void update_history(struct cpuidle_device *dev, int idx)
1333{
1334 struct lpm_history *history = &per_cpu(hist, dev->cpu);
1335 uint32_t tmr = 0;
1336
1337 if (!lpm_prediction)
1338 return;
1339
1340 if (history->htmr_wkup) {
1341 if (!history->hptr)
1342 history->hptr = MAXSAMPLES-1;
1343 else
1344 history->hptr--;
1345
1346 history->resi[history->hptr] += dev->last_residency;
1347 history->htmr_wkup = 0;
1348 tmr = 1;
1349 } else
1350 history->resi[history->hptr] = dev->last_residency;
1351
1352 history->mode[history->hptr] = idx;
1353
1354 trace_cpu_pred_hist(history->mode[history->hptr],
1355 history->resi[history->hptr], history->hptr, tmr);
1356
1357 if (history->nsamp < MAXSAMPLES)
1358 history->nsamp++;
1359
1360 (history->hptr)++;
1361 if (history->hptr >= MAXSAMPLES)
1362 history->hptr = 0;
1363}
1364
1365static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1366 struct cpuidle_driver *drv, int idx)
1367{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001368 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Maulik Shah4275b222017-03-06 11:04:39 +05301369 bool success = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001370 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
Lina Iyer461ad8f2017-11-09 00:57:38 +00001371 ktime_t start = ktime_get();
1372 uint64_t start_time = ktime_to_ns(start), end_time;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001373 struct power_params *pwr_params;
1374
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001375 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanian2e978302017-09-01 07:30:09 -06001376 sched_set_cpu_cstate(dev->cpu, idx + 1,
1377 pwr_params->energy_overhead, pwr_params->latency_us);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001378
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001379 cpu_prepare(cpu, idx, true);
1380 cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001381
1382 trace_cpu_idle_enter(idx);
1383 lpm_stats_cpu_enter(idx, start_time);
1384
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001385 if (need_resched())
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001386 goto exit;
1387
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001388 success = psci_enter_sleep(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001389
1390exit:
1391 end_time = ktime_to_ns(ktime_get());
1392 lpm_stats_cpu_exit(idx, end_time, success);
1393
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001394 cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
1395 cpu_unprepare(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001396 sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
Lina Iyer461ad8f2017-11-09 00:57:38 +00001397 dev->last_residency = ktime_us_delta(ktime_get(), start);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001398 update_history(dev, idx);
1399 trace_cpu_idle_exit(idx, success);
1400 local_irq_enable();
1401 if (lpm_prediction) {
1402 histtimer_cancel();
1403 clusttimer_cancel();
1404 }
1405 return idx;
1406}
1407
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001408static void lpm_cpuidle_freeze(struct cpuidle_device *dev,
1409 struct cpuidle_driver *drv, int idx)
1410{
1411 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
1412 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1413
1414 for (; idx >= 0; idx--) {
1415 if (lpm_cpu_mode_allow(dev->cpu, idx, false))
1416 break;
1417 }
1418 if (idx < 0) {
1419 pr_err("Failed suspend\n");
1420 return;
1421 }
1422
1423 cpu_prepare(cpu, idx, true);
1424 cluster_prepare(cpu->parent, cpumask, idx, false, 0);
1425
1426 psci_enter_sleep(cpu, idx, false);
1427
1428 cluster_unprepare(cpu->parent, cpumask, idx, false, 0);
1429 cpu_unprepare(cpu, idx, true);
1430}
1431
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001432#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1433static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1434 struct cpumask *mask)
1435{
1436 struct cpuidle_device *device;
1437 int cpu, ret;
1438
1439
1440 if (!mask || !drv)
1441 return -EINVAL;
1442
1443 drv->cpumask = mask;
1444 ret = cpuidle_register_driver(drv);
1445 if (ret) {
1446 pr_err("Failed to register cpuidle driver %d\n", ret);
1447 goto failed_driver_register;
1448 }
1449
1450 for_each_cpu(cpu, mask) {
1451 device = &per_cpu(cpuidle_dev, cpu);
1452 device->cpu = cpu;
1453
1454 ret = cpuidle_register_device(device);
1455 if (ret) {
1456 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1457 cpu);
1458 goto failed_driver_register;
1459 }
1460 }
1461 return ret;
1462failed_driver_register:
1463 for_each_cpu(cpu, mask)
1464 cpuidle_unregister_driver(drv);
1465 return ret;
1466}
1467#else
1468static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1469 struct cpumask *mask)
1470{
1471 return cpuidle_register(drv, NULL);
1472}
1473#endif
1474
1475static struct cpuidle_governor lpm_governor = {
1476 .name = "qcom",
1477 .rating = 30,
1478 .select = lpm_cpuidle_select,
1479 .owner = THIS_MODULE,
1480};
1481
1482static int cluster_cpuidle_register(struct lpm_cluster *cl)
1483{
1484 int i = 0, ret = 0;
1485 unsigned int cpu;
1486 struct lpm_cluster *p = NULL;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001487 struct lpm_cpu *lpm_cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001488
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001489 if (list_empty(&cl->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001490 struct lpm_cluster *n;
1491
1492 list_for_each_entry(n, &cl->child, list) {
1493 ret = cluster_cpuidle_register(n);
1494 if (ret)
1495 break;
1496 }
1497 return ret;
1498 }
1499
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001500 list_for_each_entry(lpm_cpu, &cl->cpu, list) {
1501 lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
1502 if (!lpm_cpu->drv)
1503 return -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001504
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001505 lpm_cpu->drv->name = "msm_idle";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001506
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001507 for (i = 0; i < lpm_cpu->nlevels; i++) {
1508 struct cpuidle_state *st = &lpm_cpu->drv->states[i];
1509 struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001510
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001511 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
Archana Sathyakumar46db0dc2017-10-10 11:11:45 -06001512 snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
1513 cpu_level->name);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001514 st->flags = 0;
1515 st->exit_latency = cpu_level->pwr.latency_us;
1516 st->power_usage = cpu_level->pwr.ss_power;
1517 st->target_residency = 0;
1518 st->enter = lpm_cpuidle_enter;
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001519 if (i == lpm_cpu->nlevels - 1)
1520 st->enter_freeze = lpm_cpuidle_freeze;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001521 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001522
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001523 lpm_cpu->drv->state_count = lpm_cpu->nlevels;
1524 lpm_cpu->drv->safe_state_index = 0;
1525 for_each_cpu(cpu, &lpm_cpu->related_cpus)
1526 per_cpu(cpu_lpm, cpu) = lpm_cpu;
1527
1528 for_each_possible_cpu(cpu) {
1529 if (cpu_online(cpu))
1530 continue;
1531 if (per_cpu(cpu_lpm, cpu))
1532 p = per_cpu(cpu_lpm, cpu)->parent;
1533 while (p) {
1534 int j;
1535
1536 spin_lock(&p->sync_lock);
1537 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1538 for (j = 0; j < p->nlevels; j++)
1539 cpumask_copy(
1540 &p->levels[j].num_cpu_votes,
1541 &p->num_children_in_sync);
1542 spin_unlock(&p->sync_lock);
1543 p = p->parent;
1544 }
1545 }
1546 ret = cpuidle_register_cpu(lpm_cpu->drv,
1547 &lpm_cpu->related_cpus);
1548
1549 if (ret) {
1550 kfree(lpm_cpu->drv);
1551 return -ENOMEM;
1552 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001553 }
1554 return 0;
1555}
1556
1557/**
1558 * init_lpm - initializes the governor
1559 */
1560static int __init init_lpm(void)
1561{
1562 return cpuidle_register_governor(&lpm_governor);
1563}
1564
1565postcore_initcall(init_lpm);
1566
1567static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1568 struct lpm_cluster *parent)
1569{
1570 const char **level_name;
1571 int i;
1572
1573 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1574
1575 if (!level_name)
1576 return;
1577
1578 for (i = 0; i < cpu->nlevels; i++)
1579 level_name[i] = cpu->levels[i].name;
1580
1581 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001582 parent->stats, &cpu->related_cpus);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001583
1584 kfree(level_name);
1585}
1586
1587static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1588 struct lpm_cluster *parent)
1589{
1590 const char **level_name;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001591 struct lpm_cluster *child;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001592 struct lpm_cpu *cpu;
1593 int i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001594
1595 if (!cl)
1596 return;
1597
1598 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1599
1600 if (!level_name)
1601 return;
1602
1603 for (i = 0; i < cl->nlevels; i++)
1604 level_name[i] = cl->levels[i].level_name;
1605
1606 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1607 cl->nlevels, parent ? parent->stats : NULL, NULL);
1608
1609 kfree(level_name);
1610
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001611 list_for_each_entry(cpu, &cl->cpu, list) {
1612 pr_err("%s()\n", __func__);
1613 register_cpu_lpm_stats(cpu, cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001614 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001615 if (!list_empty(&cl->cpu))
1616 return;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001617
1618 list_for_each_entry(child, &cl->child, list)
1619 register_cluster_lpm_stats(child, cl);
1620}
1621
1622static int lpm_suspend_prepare(void)
1623{
1624 suspend_in_progress = true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001625 lpm_stats_suspend_enter();
1626
1627 return 0;
1628}
1629
1630static void lpm_suspend_wake(void)
1631{
1632 suspend_in_progress = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001633 lpm_stats_suspend_exit();
1634}
1635
1636static int lpm_suspend_enter(suspend_state_t state)
1637{
1638 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001639 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
1640 struct lpm_cluster *cluster = lpm_cpu->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001641 const struct cpumask *cpumask = get_cpu_mask(cpu);
1642 int idx;
1643
1644 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001645 if (lpm_cpu_mode_allow(cpu, idx, false))
1646 break;
1647 }
1648 if (idx < 0) {
1649 pr_err("Failed suspend\n");
1650 return 0;
1651 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001652 cpu_prepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001653 cluster_prepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001654
Maulik Shah296ddb12017-07-03 12:25:54 +05301655 psci_enter_sleep(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001656
1657 cluster_unprepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001658 cpu_unprepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001659 return 0;
1660}
1661
1662static const struct platform_suspend_ops lpm_suspend_ops = {
1663 .enter = lpm_suspend_enter,
1664 .valid = suspend_valid_only_mem,
1665 .prepare_late = lpm_suspend_prepare,
1666 .wake = lpm_suspend_wake,
1667};
1668
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001669static const struct platform_freeze_ops lpm_freeze_ops = {
1670 .prepare = lpm_suspend_prepare,
1671 .restore = lpm_suspend_wake,
1672};
1673
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001674static int lpm_probe(struct platform_device *pdev)
1675{
1676 int ret;
1677 int size;
1678 struct kobject *module_kobj = NULL;
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +05301679 struct md_region md_entry;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001680
1681 get_online_cpus();
1682 lpm_root_node = lpm_of_parse_cluster(pdev);
1683
1684 if (IS_ERR_OR_NULL(lpm_root_node)) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301685 pr_err("Failed to probe low power modes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001686 put_online_cpus();
1687 return PTR_ERR(lpm_root_node);
1688 }
1689
1690 if (print_parsed_dt)
1691 cluster_dt_walkthrough(lpm_root_node);
1692
1693 /*
1694 * Register hotplug notifier before broadcast time to ensure there
1695 * to prevent race where a broadcast timer might not be setup on for a
1696 * core. BUG in existing code but no known issues possibly because of
1697 * how late lpm_levels gets initialized.
1698 */
1699 suspend_set_ops(&lpm_suspend_ops);
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001700 freeze_set_ops(&lpm_freeze_ops);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001701 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1702 hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1703 cluster_timer_init(lpm_root_node);
1704
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001705 size = num_dbg_elements * sizeof(struct lpm_debug);
1706 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1707 &lpm_debug_phys, GFP_KERNEL);
Maulik Shah296ddb12017-07-03 12:25:54 +05301708
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001709 register_cluster_lpm_stats(lpm_root_node, NULL);
1710
1711 ret = cluster_cpuidle_register(lpm_root_node);
1712 put_online_cpus();
1713 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301714 pr_err("Failed to register with cpuidle framework\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001715 goto failed;
1716 }
Maulik Shah296ddb12017-07-03 12:25:54 +05301717
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -06001718 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1719 "AP_QCOM_SLEEP_STARTING",
1720 lpm_starting_cpu, lpm_dying_cpu);
1721 if (ret)
1722 goto failed;
1723
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001724 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1725 if (!module_kobj) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301726 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001727 ret = -ENOENT;
1728 goto failed;
1729 }
1730
1731 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1732 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301733 pr_err("Failed to create cluster level nodes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001734 goto failed;
1735 }
1736
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +05301737 /* Add lpm_debug to Minidump*/
1738 strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
1739 md_entry.virt_addr = (uintptr_t)lpm_debug;
1740 md_entry.phys_addr = lpm_debug_phys;
1741 md_entry.size = size;
1742 if (msm_minidump_add_region(&md_entry))
1743 pr_info("Failed to add lpm_debug in Minidump\n");
1744
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001745 return 0;
1746failed:
1747 free_cluster_node(lpm_root_node);
1748 lpm_root_node = NULL;
1749 return ret;
1750}
1751
1752static const struct of_device_id lpm_mtch_tbl[] = {
1753 {.compatible = "qcom,lpm-levels"},
1754 {},
1755};
1756
1757static struct platform_driver lpm_driver = {
1758 .probe = lpm_probe,
1759 .driver = {
1760 .name = "lpm-levels",
1761 .owner = THIS_MODULE,
1762 .of_match_table = lpm_mtch_tbl,
1763 },
1764};
1765
1766static int __init lpm_levels_module_init(void)
1767{
1768 int rc;
1769
Lina Iyer03014652017-12-13 22:37:36 +00001770#ifdef CONFIG_ARM
1771 int cpu;
1772
1773 for_each_possible_cpu(cpu) {
Maulik Shah01dcec62018-03-02 17:17:01 +05301774 rc = arm_cpuidle_init(cpu);
Lina Iyer03014652017-12-13 22:37:36 +00001775 if (rc) {
1776 pr_err("CPU%d ARM CPUidle init failed (%d)\n", cpu, rc);
1777 return rc;
1778 }
1779 }
1780#endif
1781
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001782 rc = platform_driver_register(&lpm_driver);
1783 if (rc) {
1784 pr_info("Error registering %s\n", lpm_driver.driver.name);
1785 goto fail;
1786 }
1787
1788fail:
1789 return rc;
1790}
1791late_initcall(lpm_levels_module_init);