blob: 88d061d11cd20400ef01d2f699fc9fc20125abd5 [file] [log] [blame]
Raghavendra Kakarlab26d5d92020-01-09 19:09:13 +05301/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
3 * Copyright (C) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
Maulik Shah296ddb12017-07-03 12:25:54 +053016#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
17
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070018#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/mutex.h>
24#include <linux/cpu.h>
25#include <linux/of.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070026#include <linux/hrtimer.h>
27#include <linux/ktime.h>
28#include <linux/tick.h>
29#include <linux/suspend.h>
30#include <linux/pm_qos.h>
31#include <linux/of_platform.h>
32#include <linux/smp.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070033#include <linux/dma-mapping.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070034#include <linux/moduleparam.h>
35#include <linux/sched.h>
36#include <linux/cpu_pm.h>
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -060037#include <linux/cpuhotplug.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070038#include <soc/qcom/pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070039#include <soc/qcom/event_timer.h>
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +053040#include <soc/qcom/lpm_levels.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070041#include <soc/qcom/lpm-stats.h>
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +053042#include <soc/qcom/minidump.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070043#include <asm/arch_timer.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070044#include <asm/suspend.h>
45#include <asm/cpuidle.h>
46#include "lpm-levels.h"
47#include <trace/events/power.h>
Shefali Jain0dc6e782017-11-27 13:06:27 +053048#if defined(CONFIG_COMMON_CLK)
Maulik Shahdf5cbe32017-08-17 23:22:34 +053049#include "../clk/clk.h"
Shefali Jain0dc6e782017-11-27 13:06:27 +053050#elif defined(CONFIG_COMMON_CLK_MSM)
51#include "../../drivers/clk/msm/clock.h"
52#endif /* CONFIG_COMMON_CLK */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070053#define CREATE_TRACE_POINTS
54#include <trace/events/trace_msm_low_power.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070055
56#define SCLK_HZ (32768)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070057#define PSCI_POWER_STATE(reset) (reset << 30)
58#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070059#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070060
61enum {
62 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
63 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
64};
65
66enum debug_event {
67 CPU_ENTER,
68 CPU_EXIT,
69 CLUSTER_ENTER,
70 CLUSTER_EXIT,
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -060071 CPU_HP_STARTING,
72 CPU_HP_DYING,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070073};
74
75struct lpm_debug {
76 cycle_t time;
77 enum debug_event evt;
78 int cpu;
79 uint32_t arg1;
80 uint32_t arg2;
81 uint32_t arg3;
82 uint32_t arg4;
83};
84
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +053085static struct system_pm_ops *sys_pm_ops;
Srinivas Rao L30952dd2018-08-01 18:51:44 +053086static DEFINE_SPINLOCK(bc_timer_lock);
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +053087
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070088struct lpm_cluster *lpm_root_node;
89
90#define MAXSAMPLES 5
91
92static bool lpm_prediction = true;
93module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
94
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070095static uint32_t bias_hyst;
96module_param_named(bias_hyst, bias_hyst, uint, 0664);
97
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070098struct lpm_history {
99 uint32_t resi[MAXSAMPLES];
100 int mode[MAXSAMPLES];
101 int nsamp;
102 uint32_t hptr;
103 uint32_t hinvalid;
104 uint32_t htmr_wkup;
105 int64_t stime;
106};
107
108static DEFINE_PER_CPU(struct lpm_history, hist);
109
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600110static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700111static bool suspend_in_progress;
112static struct hrtimer lpm_hrtimer;
Raghavendra Kakarlad2766762017-10-26 22:00:13 +0530113static DEFINE_PER_CPU(struct hrtimer, histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700114static struct lpm_debug *lpm_debug;
115static phys_addr_t lpm_debug_phys;
116static const int num_dbg_elements = 0x100;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700117
118static void cluster_unprepare(struct lpm_cluster *cluster,
119 const struct cpumask *cpu, int child_idx, bool from_idle,
Maulik Shah083e22a2018-11-29 14:30:07 +0530120 int64_t time, bool success);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700121static void cluster_prepare(struct lpm_cluster *cluster,
122 const struct cpumask *cpu, int child_idx, bool from_idle,
123 int64_t time);
124
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700125static bool print_parsed_dt;
126module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
127
128static bool sleep_disabled;
129module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
130
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700131/**
132 * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
133 *
134 * Returns an s32 latency value
135 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700136s32 msm_cpuidle_get_deep_idle_latency(void)
137{
138 return 10;
139}
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700140EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700141
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530142uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700143{
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530144 if (sys_pm_ops)
145 return -EUSERS;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700146
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530147 sys_pm_ops = pm_ops;
148
149 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700150}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700151
152static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
153 struct latency_level *lat_level)
154{
155 struct list_head *list;
156 struct lpm_cluster_level *level;
157 struct lpm_cluster *n;
158 struct power_params *pwr_params;
159 uint32_t latency = 0;
160 int i;
161
Maulik Shah3f548f12018-06-21 11:48:23 +0530162 if (list_empty(&cluster->list)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700163 for (i = 0; i < cluster->nlevels; i++) {
164 level = &cluster->levels[i];
165 pwr_params = &level->pwr;
166 if (lat_level->reset_level == level->reset_level) {
167 if ((latency > pwr_params->latency_us)
168 || (!latency))
169 latency = pwr_params->latency_us;
170 break;
171 }
172 }
173 } else {
174 list_for_each(list, &cluster->parent->child) {
175 n = list_entry(list, typeof(*n), list);
176 if (lat_level->level_name) {
177 if (strcmp(lat_level->level_name,
178 n->cluster_name))
179 continue;
180 }
181 for (i = 0; i < n->nlevels; i++) {
182 level = &n->levels[i];
183 pwr_params = &level->pwr;
184 if (lat_level->reset_level ==
185 level->reset_level) {
186 if ((latency > pwr_params->latency_us)
187 || (!latency))
188 latency =
189 pwr_params->latency_us;
190 break;
191 }
192 }
193 }
194 }
195 return latency;
196}
197
198static uint32_t least_cpu_latency(struct list_head *child,
199 struct latency_level *lat_level)
200{
201 struct list_head *list;
202 struct lpm_cpu_level *level;
203 struct power_params *pwr_params;
204 struct lpm_cpu *cpu;
205 struct lpm_cluster *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600206 uint32_t lat = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700207 int i;
208
209 list_for_each(list, child) {
210 n = list_entry(list, typeof(*n), list);
211 if (lat_level->level_name) {
212 if (strcmp(lat_level->level_name, n->cluster_name))
213 continue;
214 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600215 list_for_each_entry(cpu, &n->cpu, list) {
216 for (i = 0; i < cpu->nlevels; i++) {
217 level = &cpu->levels[i];
218 pwr_params = &level->pwr;
219 if (lat_level->reset_level
220 == level->reset_level) {
221 if ((lat > pwr_params->latency_us)
222 || (!lat))
223 lat = pwr_params->latency_us;
224 break;
225 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700226 }
227 }
228 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600229 return lat;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700230}
231
232static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
233 int affinity_level)
234{
235 struct lpm_cluster *n;
236
237 if ((cluster->aff_level == affinity_level)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600238 || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700239 return cluster;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600240 else if (list_empty(&cluster->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700241 n = list_entry(cluster->child.next, typeof(*n), list);
242 return cluster_aff_match(n, affinity_level);
243 } else
244 return NULL;
245}
246
247int lpm_get_latency(struct latency_level *level, uint32_t *latency)
248{
249 struct lpm_cluster *cluster;
250 uint32_t val;
251
252 if (!lpm_root_node) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530253 pr_err("lpm_probe not completed\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700254 return -EAGAIN;
255 }
256
257 if ((level->affinity_level < 0)
258 || (level->affinity_level > lpm_root_node->aff_level)
259 || (level->reset_level < LPM_RESET_LVL_RET)
260 || (level->reset_level > LPM_RESET_LVL_PC)
261 || !latency)
262 return -EINVAL;
263
264 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
265 if (!cluster) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530266 pr_err("No matching cluster found for affinity_level:%d\n",
267 level->affinity_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700268 return -EINVAL;
269 }
270
271 if (level->affinity_level == 0)
272 val = least_cpu_latency(&cluster->parent->child, level);
273 else
274 val = least_cluster_latency(cluster, level);
275
276 if (!val) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530277 pr_err("No mode with affinity_level:%d reset_level:%d\n",
278 level->affinity_level, level->reset_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700279 return -EINVAL;
280 }
281
282 *latency = val;
283
284 return 0;
285}
286EXPORT_SYMBOL(lpm_get_latency);
287
288static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
289 uint32_t arg2, uint32_t arg3, uint32_t arg4)
290{
291 struct lpm_debug *dbg;
292 int idx;
293 static DEFINE_SPINLOCK(debug_lock);
294 static int pc_event_index;
295
296 if (!lpm_debug)
297 return;
298
299 spin_lock(&debug_lock);
300 idx = pc_event_index++;
301 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
302
303 dbg->evt = event;
304 dbg->time = arch_counter_get_cntvct();
305 dbg->cpu = raw_smp_processor_id();
306 dbg->arg1 = arg1;
307 dbg->arg2 = arg2;
308 dbg->arg3 = arg3;
309 dbg->arg4 = arg4;
310 spin_unlock(&debug_lock);
311}
312
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600313static int lpm_dying_cpu(unsigned int cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700314{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600315 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700316
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600317 update_debug_pc_event(CPU_HP_DYING, cpu,
318 cluster->num_children_in_sync.bits[0],
319 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600320 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
321 return 0;
322}
323
324static int lpm_starting_cpu(unsigned int cpu)
325{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600326 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600327
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600328 update_debug_pc_event(CPU_HP_STARTING, cpu,
329 cluster->num_children_in_sync.bits[0],
330 cluster->child_cpus.bits[0], false);
Maulik Shah083e22a2018-11-29 14:30:07 +0530331 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false,
332 0, true);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600333 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700334}
335
336static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
337{
338 return HRTIMER_NORESTART;
339}
340
341static void histtimer_cancel(void)
342{
Raghavendra Kakarlad2766762017-10-26 22:00:13 +0530343 unsigned int cpu = raw_smp_processor_id();
344 struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
Maulik Shaha719e0f2018-05-28 13:57:53 +0530345 ktime_t time_rem;
346
347 time_rem = hrtimer_get_remaining(cpu_histtimer);
348 if (ktime_to_us(time_rem) <= 0)
349 return;
Raghavendra Kakarlad2766762017-10-26 22:00:13 +0530350
351 hrtimer_try_to_cancel(cpu_histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700352}
353
354static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
355{
356 int cpu = raw_smp_processor_id();
357 struct lpm_history *history = &per_cpu(hist, cpu);
358
359 history->hinvalid = 1;
360 return HRTIMER_NORESTART;
361}
362
363static void histtimer_start(uint32_t time_us)
364{
365 uint64_t time_ns = time_us * NSEC_PER_USEC;
366 ktime_t hist_ktime = ns_to_ktime(time_ns);
Raghavendra Kakarlad2766762017-10-26 22:00:13 +0530367 unsigned int cpu = raw_smp_processor_id();
368 struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700369
Raghavendra Kakarlad2766762017-10-26 22:00:13 +0530370 cpu_histtimer->function = histtimer_fn;
371 hrtimer_start(cpu_histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700372}
373
374static void cluster_timer_init(struct lpm_cluster *cluster)
375{
376 struct list_head *list;
377
378 if (!cluster)
379 return;
380
381 hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
382
383 list_for_each(list, &cluster->child) {
384 struct lpm_cluster *n;
385
386 n = list_entry(list, typeof(*n), list);
387 cluster_timer_init(n);
388 }
389}
390
391static void clusttimer_cancel(void)
392{
393 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600394 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Maulik Shaha719e0f2018-05-28 13:57:53 +0530395 ktime_t time_rem;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700396
Maulik Shaha719e0f2018-05-28 13:57:53 +0530397 time_rem = hrtimer_get_remaining(&cluster->histtimer);
398 if (ktime_to_us(time_rem) > 0)
399 hrtimer_try_to_cancel(&cluster->histtimer);
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700400
Maulik Shaha719e0f2018-05-28 13:57:53 +0530401 if (cluster->parent) {
402 time_rem = hrtimer_get_remaining(
403 &cluster->parent->histtimer);
404
405 if (ktime_to_us(time_rem) <= 0)
406 return;
407
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700408 hrtimer_try_to_cancel(&cluster->parent->histtimer);
Maulik Shaha719e0f2018-05-28 13:57:53 +0530409 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700410}
411
412static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
413{
414 struct lpm_cluster *cluster = container_of(h,
415 struct lpm_cluster, histtimer);
416
417 cluster->history.hinvalid = 1;
418 return HRTIMER_NORESTART;
419}
420
421static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
422{
423 uint64_t time_ns = time_us * NSEC_PER_USEC;
424 ktime_t clust_ktime = ns_to_ktime(time_ns);
425
426 cluster->histtimer.function = clusttimer_fn;
427 hrtimer_start(&cluster->histtimer, clust_ktime,
428 HRTIMER_MODE_REL_PINNED);
429}
430
431static void msm_pm_set_timer(uint32_t modified_time_us)
432{
433 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
434 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
435
436 lpm_hrtimer.function = lpm_hrtimer_cb;
437 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
438}
439
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700440static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
441 struct lpm_cpu *cpu, int *idx_restrict,
442 uint32_t *idx_restrict_time)
443{
444 int i, j, divisor;
445 uint64_t max, avg, stddev;
446 int64_t thresh = LLONG_MAX;
447 struct lpm_history *history = &per_cpu(hist, dev->cpu);
448 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
Srinivas Rao L3662b472017-10-13 23:11:21 +0530449 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700450
Mahesh Sivasubramanian73810922017-10-16 16:46:56 -0600451 if (!lpm_prediction || !cpu->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700452 return 0;
453
454 /*
455 * Samples are marked invalid when woken-up due to timer,
456 * so donot predict.
457 */
458 if (history->hinvalid) {
459 history->hinvalid = 0;
460 history->htmr_wkup = 1;
461 history->stime = 0;
462 return 0;
463 }
464
465 /*
466 * Predict only when all the samples are collected.
467 */
468 if (history->nsamp < MAXSAMPLES) {
469 history->stime = 0;
470 return 0;
471 }
472
473 /*
474 * Check if the samples are not much deviated, if so use the
475 * average of those as predicted sleep time. Else if any
476 * specific mode has more premature exits return the index of
477 * that mode.
478 */
479
480again:
481 max = avg = divisor = stddev = 0;
482 for (i = 0; i < MAXSAMPLES; i++) {
483 int64_t value = history->resi[i];
484
485 if (value <= thresh) {
486 avg += value;
487 divisor++;
488 if (value > max)
489 max = value;
490 }
491 }
492 do_div(avg, divisor);
493
494 for (i = 0; i < MAXSAMPLES; i++) {
495 int64_t value = history->resi[i];
496
497 if (value <= thresh) {
498 int64_t diff = value - avg;
499
500 stddev += diff * diff;
501 }
502 }
503 do_div(stddev, divisor);
504 stddev = int_sqrt(stddev);
505
506 /*
507 * If the deviation is less, return the average, else
508 * ignore one maximum sample and retry
509 */
510 if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530511 || stddev <= cpu->ref_stddev) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700512 history->stime = ktime_to_us(ktime_get()) + avg;
513 return avg;
514 } else if (divisor > (MAXSAMPLES - 1)) {
515 thresh = max - 1;
516 goto again;
517 }
518
519 /*
520 * Find the number of premature exits for each of the mode,
521 * excluding clockgating mode, and they are more than fifty
522 * percent restrict that and deeper modes.
523 */
524 if (history->htmr_wkup != 1) {
525 for (j = 1; j < cpu->nlevels; j++) {
526 uint32_t failed = 0;
527 uint64_t total = 0;
528
529 for (i = 0; i < MAXSAMPLES; i++) {
530 if ((history->mode[i] == j) &&
531 (history->resi[i] < min_residency[j])) {
532 failed++;
533 total += history->resi[i];
534 }
535 }
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530536 if (failed >= cpu->ref_premature_cnt) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700537 *idx_restrict = j;
538 do_div(total, failed);
Srinivas Rao L3662b472017-10-13 23:11:21 +0530539 for (i = 0; i < j; i++) {
540 if (total < max_residency[i]) {
541 *idx_restrict = i+1;
542 total = max_residency[i];
543 break;
544 }
545 }
546
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700547 *idx_restrict_time = total;
548 history->stime = ktime_to_us(ktime_get())
549 + *idx_restrict_time;
550 break;
551 }
552 }
553 }
554 return 0;
555}
556
557static inline void invalidate_predict_history(struct cpuidle_device *dev)
558{
559 struct lpm_history *history = &per_cpu(hist, dev->cpu);
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530560 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700561
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530562 if (!lpm_prediction || !lpm_cpu->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700563 return;
564
565 if (history->hinvalid) {
566 history->hinvalid = 0;
567 history->htmr_wkup = 1;
568 history->stime = 0;
569 }
570}
571
572static void clear_predict_history(void)
573{
574 struct lpm_history *history;
575 int i;
576 unsigned int cpu;
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530577 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, raw_smp_processor_id());
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700578
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530579 if (!lpm_prediction || !lpm_cpu->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700580 return;
581
582 for_each_possible_cpu(cpu) {
583 history = &per_cpu(hist, cpu);
584 for (i = 0; i < MAXSAMPLES; i++) {
585 history->resi[i] = 0;
586 history->mode[i] = -1;
587 history->hptr = 0;
588 history->nsamp = 0;
589 history->stime = 0;
590 }
591 }
592}
593
594static void update_history(struct cpuidle_device *dev, int idx);
595
Lina Iyer634bfda2017-07-06 16:16:57 -0600596static inline bool is_cpu_biased(int cpu)
597{
598 u64 now = sched_clock();
599 u64 last = sched_get_cpu_last_busy_time(cpu);
600
601 if (!last)
602 return false;
603
604 return (now - last) < BIAS_HYST;
605}
606
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700607static int cpu_power_select(struct cpuidle_device *dev,
608 struct lpm_cpu *cpu)
609{
Lina Iyerb4a0c3d2017-07-17 11:50:25 -0600610 int best_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700611 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
612 dev->cpu);
Naresh Malladi64b40552017-07-03 15:53:48 +0530613 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700614 uint32_t modified_time_us = 0;
615 uint32_t next_event_us = 0;
616 int i, idx_restrict;
617 uint32_t lvl_latency_us = 0;
618 uint64_t predicted = 0;
619 uint32_t htime = 0, idx_restrict_time = 0;
Naresh Malladi64b40552017-07-03 15:53:48 +0530620 uint32_t next_wakeup_us = (uint32_t)sleep_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700621 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
622 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
623
Maulik Shah32b352c2017-04-18 20:48:44 +0530624 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
Maulik Shah4275b222017-03-06 11:04:39 +0530625 return best_level;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700626
627 idx_restrict = cpu->nlevels + 1;
628
629 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
630
Nitin Bandware267e042018-01-08 15:38:31 -0800631 if (is_cpu_biased(dev->cpu) && (!cpu_isolated(dev->cpu)))
Lina Iyer634bfda2017-07-06 16:16:57 -0600632 goto done_select;
Lina Iyer634bfda2017-07-06 16:16:57 -0600633
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700634 for (i = 0; i < cpu->nlevels; i++) {
635 struct lpm_cpu_level *level = &cpu->levels[i];
636 struct power_params *pwr_params = &level->pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700637 bool allow;
638
Mahesh Sivasubramanian73810922017-10-16 16:46:56 -0600639 allow = i ? lpm_cpu_mode_allow(dev->cpu, i, true) : true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700640
641 if (!allow)
642 continue;
643
644 lvl_latency_us = pwr_params->latency_us;
645
646 if (latency_us < lvl_latency_us)
647 break;
648
649 if (next_event_us) {
650 if (next_event_us < lvl_latency_us)
651 break;
652
653 if (((next_event_us - lvl_latency_us) < sleep_us) ||
654 (next_event_us < sleep_us))
655 next_wakeup_us = next_event_us - lvl_latency_us;
656 }
657
Maulik Shah92a21202017-11-30 17:09:29 +0530658 if (!i && !cpu_isolated(dev->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700659 /*
660 * If the next_wake_us itself is not sufficient for
661 * deeper low power modes than clock gating do not
662 * call prediction.
663 */
664 if (next_wakeup_us > max_residency[i]) {
665 predicted = lpm_cpuidle_predict(dev, cpu,
666 &idx_restrict, &idx_restrict_time);
Srinivas Rao Le3af7dc2017-05-31 16:06:52 +0530667 if (predicted && (predicted < min_residency[i]))
668 predicted = min_residency[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700669 } else
670 invalidate_predict_history(dev);
671 }
672
673 if (i >= idx_restrict)
674 break;
675
676 best_level = i;
677
Maulik Shah296ddb12017-07-03 12:25:54 +0530678 if (next_event_us && next_event_us < sleep_us && !i)
679 modified_time_us = next_event_us - lvl_latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700680 else
681 modified_time_us = 0;
682
683 if (predicted ? (predicted <= max_residency[i])
684 : (next_wakeup_us <= max_residency[i]))
685 break;
686 }
687
688 if (modified_time_us)
689 msm_pm_set_timer(modified_time_us);
690
691 /*
692 * Start timer to avoid staying in shallower mode forever
693 * incase of misprediciton
694 */
695 if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
696 && ((best_level >= 0)
697 && (best_level < (cpu->nlevels-1)))) {
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530698 htime = predicted + cpu->tmr_add;
699 if (htime == cpu->tmr_add)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700700 htime = idx_restrict_time;
701 else if (htime > max_residency[best_level])
702 htime = max_residency[best_level];
703
704 if ((next_wakeup_us > htime) &&
705 ((next_wakeup_us - htime) > max_residency[best_level]))
706 histtimer_start(htime);
707 }
708
Lina Iyer634bfda2017-07-06 16:16:57 -0600709done_select:
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700710 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
711
712 trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
713 predicted, htime);
714
715 return best_level;
716}
717
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530718static unsigned int get_next_online_cpu(bool from_idle)
719{
720 unsigned int cpu;
721 ktime_t next_event;
722 unsigned int next_cpu = raw_smp_processor_id();
723
724 if (!from_idle)
725 return next_cpu;
726 next_event.tv64 = KTIME_MAX;
727 for_each_online_cpu(cpu) {
728 ktime_t *next_event_c;
729
730 next_event_c = get_next_event_cpu(cpu);
731 if (next_event_c->tv64 < next_event.tv64) {
732 next_event.tv64 = next_event_c->tv64;
733 next_cpu = cpu;
734 }
735 }
736 return next_cpu;
737}
738
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700739static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530740 bool from_idle, uint32_t *pred_time)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700741{
742 int cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700743 ktime_t next_event;
744 struct cpumask online_cpus_in_cluster;
745 struct lpm_history *history;
746 int64_t prediction = LONG_MAX;
747
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530748 if (!from_idle)
749 return ~0ULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700750
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530751 next_event.tv64 = KTIME_MAX;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700752 cpumask_and(&online_cpus_in_cluster,
753 &cluster->num_children_in_sync, cpu_online_mask);
754
755 for_each_cpu(cpu, &online_cpus_in_cluster) {
756 ktime_t *next_event_c;
757
758 next_event_c = get_next_event_cpu(cpu);
759 if (next_event_c->tv64 < next_event.tv64) {
760 next_event.tv64 = next_event_c->tv64;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700761 }
762
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530763 if (from_idle && lpm_prediction && cluster->lpm_prediction) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700764 history = &per_cpu(hist, cpu);
765 if (history->stime && (history->stime < prediction))
766 prediction = history->stime;
767 }
768 }
769
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530770 if (from_idle && lpm_prediction && cluster->lpm_prediction) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700771 if (prediction > ktime_to_us(ktime_get()))
772 *pred_time = prediction - ktime_to_us(ktime_get());
773 }
774
775 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
776 return ktime_to_us(ktime_sub(next_event, ktime_get()));
777 else
778 return 0;
779}
780
781static int cluster_predict(struct lpm_cluster *cluster,
782 uint32_t *pred_us)
783{
784 int i, j;
785 int ret = 0;
786 struct cluster_history *history = &cluster->history;
787 int64_t cur_time = ktime_to_us(ktime_get());
788
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530789 if (!lpm_prediction || !cluster->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700790 return 0;
791
792 if (history->hinvalid) {
793 history->hinvalid = 0;
794 history->htmr_wkup = 1;
795 history->flag = 0;
796 return ret;
797 }
798
799 if (history->nsamp == MAXSAMPLES) {
800 for (i = 0; i < MAXSAMPLES; i++) {
801 if ((cur_time - history->stime[i])
802 > CLUST_SMPL_INVLD_TIME)
803 history->nsamp--;
804 }
805 }
806
807 if (history->nsamp < MAXSAMPLES) {
808 history->flag = 0;
809 return ret;
810 }
811
812 if (history->flag == 2)
813 history->flag = 0;
814
815 if (history->htmr_wkup != 1) {
816 uint64_t total = 0;
817
818 if (history->flag == 1) {
819 for (i = 0; i < MAXSAMPLES; i++)
820 total += history->resi[i];
821 do_div(total, MAXSAMPLES);
822 *pred_us = total;
823 return 2;
824 }
825
826 for (j = 1; j < cluster->nlevels; j++) {
827 uint32_t failed = 0;
828
829 total = 0;
830 for (i = 0; i < MAXSAMPLES; i++) {
831 if ((history->mode[i] == j) && (history->resi[i]
832 < cluster->levels[j].pwr.min_residency)) {
833 failed++;
834 total += history->resi[i];
835 }
836 }
837
838 if (failed > (MAXSAMPLES-2)) {
839 do_div(total, failed);
840 *pred_us = total;
841 history->flag = 1;
842 return 1;
843 }
844 }
845 }
846
847 return ret;
848}
849
850static void update_cluster_history_time(struct cluster_history *history,
851 int idx, uint64_t start)
852{
853 history->entry_idx = idx;
854 history->entry_time = start;
855}
856
857static void update_cluster_history(struct cluster_history *history, int idx)
858{
859 uint32_t tmr = 0;
860 uint32_t residency = 0;
861 struct lpm_cluster *cluster =
862 container_of(history, struct lpm_cluster, history);
863
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530864 if (!lpm_prediction || !cluster->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700865 return;
866
867 if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
868 residency = ktime_to_us(ktime_get()) - history->entry_time;
869 history->stime[history->hptr] = history->entry_time;
870 } else
871 return;
872
873 if (history->htmr_wkup) {
874 if (!history->hptr)
875 history->hptr = MAXSAMPLES-1;
876 else
877 history->hptr--;
878
879 history->resi[history->hptr] += residency;
880
881 history->htmr_wkup = 0;
882 tmr = 1;
Maulik Shah296ddb12017-07-03 12:25:54 +0530883 } else
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700884 history->resi[history->hptr] = residency;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700885
886 history->mode[history->hptr] = idx;
887
888 history->entry_idx = INT_MIN;
889 history->entry_time = 0;
890
891 if (history->nsamp < MAXSAMPLES)
892 history->nsamp++;
893
894 trace_cluster_pred_hist(cluster->cluster_name,
895 history->mode[history->hptr], history->resi[history->hptr],
896 history->hptr, tmr);
897
898 (history->hptr)++;
899
900 if (history->hptr >= MAXSAMPLES)
901 history->hptr = 0;
902}
903
904static void clear_cl_history_each(struct cluster_history *history)
905{
906 int i;
907
908 for (i = 0; i < MAXSAMPLES; i++) {
909 history->resi[i] = 0;
910 history->mode[i] = -1;
911 history->stime[i] = 0;
912 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530913
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700914 history->hptr = 0;
915 history->nsamp = 0;
916 history->flag = 0;
917 history->hinvalid = 0;
918 history->htmr_wkup = 0;
919}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700920static void clear_cl_predict_history(void)
921{
922 struct lpm_cluster *cluster = lpm_root_node;
923 struct list_head *list;
924
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +0530925 if (!lpm_prediction || !cluster->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700926 return;
927
928 clear_cl_history_each(&cluster->history);
929
930 list_for_each(list, &cluster->child) {
931 struct lpm_cluster *n;
932
933 n = list_entry(list, typeof(*n), list);
934 clear_cl_history_each(&n->history);
935 }
936}
937
938static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
939 int *ispred)
940{
941 int best_level = -1;
942 int i;
943 struct cpumask mask;
944 uint32_t latency_us = ~0U;
945 uint32_t sleep_us;
946 uint32_t cpupred_us = 0, pred_us = 0;
947 int pred_mode = 0, predicted = 0;
948
949 if (!cluster)
950 return -EINVAL;
951
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530952 sleep_us = (uint32_t)get_cluster_sleep_time(cluster,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700953 from_idle, &cpupred_us);
954
955 if (from_idle) {
956 pred_mode = cluster_predict(cluster, &pred_us);
957
958 if (cpupred_us && pred_mode && (cpupred_us < pred_us))
959 pred_us = cpupred_us;
960
961 if (pred_us && pred_mode && (pred_us < sleep_us))
962 predicted = 1;
963
964 if (predicted && (pred_us == cpupred_us))
965 predicted = 2;
966 }
967
968 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
969 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
970 &mask);
971
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700972 for (i = 0; i < cluster->nlevels; i++) {
973 struct lpm_cluster_level *level = &cluster->levels[i];
974 struct power_params *pwr_params = &level->pwr;
975
976 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
977 continue;
978
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700979 if (!cpumask_equal(&cluster->num_children_in_sync,
980 &level->num_cpu_votes))
981 continue;
982
983 if (from_idle && latency_us < pwr_params->latency_us)
984 break;
985
986 if (sleep_us < pwr_params->time_overhead_us)
987 break;
988
989 if (suspend_in_progress && from_idle && level->notify_rpm)
990 continue;
991
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +0530992 if (level->notify_rpm) {
993 if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
994 continue;
995 if (!sys_pm_ops->sleep_allowed())
996 continue;
997 }
Lina Iyerc28bf7d2017-08-14 19:28:46 -0600998
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700999 best_level = i;
1000
Srinivas Rao L1f3e4d52016-12-29 18:49:54 +05301001 if (from_idle &&
1002 (predicted ? (pred_us <= pwr_params->max_residency)
1003 : (sleep_us <= pwr_params->max_residency)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001004 break;
1005 }
1006
1007 if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
1008 cluster->history.flag = 2;
1009
1010 *ispred = predicted;
1011
1012 trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
1013 latency_us, predicted, pred_us);
1014
1015 return best_level;
1016}
1017
1018static void cluster_notify(struct lpm_cluster *cluster,
1019 struct lpm_cluster_level *level, bool enter)
1020{
1021 if (level->is_reset && enter)
1022 cpu_cluster_pm_enter(cluster->aff_level);
1023 else if (level->is_reset && !enter)
1024 cpu_cluster_pm_exit(cluster->aff_level);
1025}
1026
1027static int cluster_configure(struct lpm_cluster *cluster, int idx,
1028 bool from_idle, int predicted)
1029{
1030 struct lpm_cluster_level *level = &cluster->levels[idx];
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301031 struct cpumask online_cpus, cpumask;
1032 unsigned int cpu;
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301033 int ret = 0;
Naresh Malladib607ea92017-06-16 11:56:21 +05301034
1035 cpumask_and(&online_cpus, &cluster->num_children_in_sync,
1036 cpu_online_mask);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001037
1038 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
Naresh Malladib607ea92017-06-16 11:56:21 +05301039 || is_IPI_pending(&online_cpus)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001040 return -EPERM;
1041 }
1042
1043 if (idx != cluster->default_level) {
1044 update_debug_pc_event(CLUSTER_ENTER, idx,
1045 cluster->num_children_in_sync.bits[0],
1046 cluster->child_cpus.bits[0], from_idle);
1047 trace_cluster_enter(cluster->cluster_name, idx,
1048 cluster->num_children_in_sync.bits[0],
1049 cluster->child_cpus.bits[0], from_idle);
1050 lpm_stats_cluster_enter(cluster->stats, idx);
1051
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301052 if (from_idle && lpm_prediction && cluster->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001053 update_cluster_history_time(&cluster->history, idx,
1054 ktime_to_us(ktime_get()));
1055 }
1056
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001057 if (level->notify_rpm) {
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001058 /*
1059 * Print the clocks which are enabled during system suspend
1060 * This debug information is useful to know which are the
1061 * clocks that are enabled and preventing the system level
1062 * LPMs(XO and Vmin).
1063 */
1064 if (!from_idle)
1065 clock_debug_print_enabled(true);
1066
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301067 cpu = get_next_online_cpu(from_idle);
1068 cpumask_copy(&cpumask, cpumask_of(cpu));
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001069
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001070 clear_predict_history();
1071 clear_cl_predict_history();
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301072 if (sys_pm_ops && sys_pm_ops->enter) {
1073 spin_lock(&bc_timer_lock);
1074 ret = sys_pm_ops->enter(&cpumask);
1075 spin_unlock(&bc_timer_lock);
1076 if (ret)
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301077 return -EBUSY;
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301078 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001079 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001080 /* Notify cluster enter event after successfully config completion */
1081 cluster_notify(cluster, level, true);
1082
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001083 cluster->last_level = idx;
1084
1085 if (predicted && (idx < (cluster->nlevels - 1))) {
1086 struct power_params *pwr_params = &cluster->levels[idx].pwr;
1087
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301088 clusttimer_start(cluster, pwr_params->max_residency +
1089 cluster->tmr_add);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001090 }
1091
1092 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001093}
1094
1095static void cluster_prepare(struct lpm_cluster *cluster,
1096 const struct cpumask *cpu, int child_idx, bool from_idle,
1097 int64_t start_time)
1098{
1099 int i;
1100 int predicted = 0;
1101
1102 if (!cluster)
1103 return;
1104
1105 if (cluster->min_child_level > child_idx)
1106 return;
1107
1108 spin_lock(&cluster->sync_lock);
1109 cpumask_or(&cluster->num_children_in_sync, cpu,
1110 &cluster->num_children_in_sync);
1111
1112 for (i = 0; i < cluster->nlevels; i++) {
1113 struct lpm_cluster_level *lvl = &cluster->levels[i];
1114
1115 if (child_idx >= lvl->min_child_level)
1116 cpumask_or(&lvl->num_cpu_votes, cpu,
1117 &lvl->num_cpu_votes);
1118 }
1119
1120 /*
1121 * cluster_select() does not make any configuration changes. So its ok
1122 * to release the lock here. If a core wakes up for a rude request,
1123 * it need not wait for another to finish its cluster selection and
1124 * configuration process
1125 */
1126
1127 if (!cpumask_equal(&cluster->num_children_in_sync,
1128 &cluster->child_cpus))
1129 goto failed;
1130
1131 i = cluster_select(cluster, from_idle, &predicted);
1132
1133 if (((i < 0) || (i == cluster->default_level))
1134 && predicted && from_idle) {
1135 update_cluster_history_time(&cluster->history,
1136 -1, ktime_to_us(ktime_get()));
1137
1138 if (i < 0) {
1139 struct power_params *pwr_params =
1140 &cluster->levels[0].pwr;
1141
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001142 clusttimer_start(cluster,
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301143 pwr_params->max_residency +
1144 cluster->tmr_add);
Maulik Shah296ddb12017-07-03 12:25:54 +05301145
1146 goto failed;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001147 }
1148 }
1149
1150 if (i < 0)
1151 goto failed;
1152
1153 if (cluster_configure(cluster, i, from_idle, predicted))
1154 goto failed;
1155
1156 cluster->stats->sleep_time = start_time;
1157 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
1158 from_idle, start_time);
1159
1160 spin_unlock(&cluster->sync_lock);
1161 return;
1162failed:
1163 spin_unlock(&cluster->sync_lock);
1164 cluster->stats->sleep_time = 0;
1165}
1166
1167static void cluster_unprepare(struct lpm_cluster *cluster,
1168 const struct cpumask *cpu, int child_idx, bool from_idle,
Maulik Shah083e22a2018-11-29 14:30:07 +05301169 int64_t end_time, bool success)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001170{
1171 struct lpm_cluster_level *level;
1172 bool first_cpu;
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001173 int last_level, i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001174
1175 if (!cluster)
1176 return;
1177
1178 if (cluster->min_child_level > child_idx)
1179 return;
1180
1181 spin_lock(&cluster->sync_lock);
1182 last_level = cluster->default_level;
1183 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
1184 &cluster->child_cpus);
1185 cpumask_andnot(&cluster->num_children_in_sync,
1186 &cluster->num_children_in_sync, cpu);
1187
1188 for (i = 0; i < cluster->nlevels; i++) {
1189 struct lpm_cluster_level *lvl = &cluster->levels[i];
1190
1191 if (child_idx >= lvl->min_child_level)
1192 cpumask_andnot(&lvl->num_cpu_votes,
1193 &lvl->num_cpu_votes, cpu);
1194 }
1195
1196 if (from_idle && first_cpu &&
1197 (cluster->last_level == cluster->default_level))
1198 update_cluster_history(&cluster->history, cluster->last_level);
1199
1200 if (!first_cpu || cluster->last_level == cluster->default_level)
1201 goto unlock_return;
1202
1203 if (cluster->stats->sleep_time)
1204 cluster->stats->sleep_time = end_time -
1205 cluster->stats->sleep_time;
Maulik Shah083e22a2018-11-29 14:30:07 +05301206 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, success);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001207
1208 level = &cluster->levels[cluster->last_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001209
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001210 if (level->notify_rpm)
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301211 if (sys_pm_ops && sys_pm_ops->exit) {
1212 spin_lock(&bc_timer_lock);
Maulik Shah083e22a2018-11-29 14:30:07 +05301213 sys_pm_ops->exit(success);
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301214 spin_unlock(&bc_timer_lock);
1215 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001216
1217 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
1218 cluster->num_children_in_sync.bits[0],
1219 cluster->child_cpus.bits[0], from_idle);
1220 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
1221 cluster->num_children_in_sync.bits[0],
1222 cluster->child_cpus.bits[0], from_idle);
1223
1224 last_level = cluster->last_level;
1225 cluster->last_level = cluster->default_level;
1226
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001227 cluster_notify(cluster, &cluster->levels[last_level], false);
1228
1229 if (from_idle)
1230 update_cluster_history(&cluster->history, last_level);
1231
1232 cluster_unprepare(cluster->parent, &cluster->child_cpus,
Maulik Shah083e22a2018-11-29 14:30:07 +05301233 last_level, from_idle, end_time, success);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001234unlock_return:
1235 spin_unlock(&cluster->sync_lock);
1236}
1237
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001238static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001239 bool from_idle)
1240{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001241 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001242
1243 /* Use broadcast timer for aggregating sleep mode within a cluster.
1244 * A broadcast timer could be used in the following scenarios
1245 * 1) The architected timer HW gets reset during certain low power
1246 * modes and the core relies on a external(broadcast) timer to wake up
1247 * from sleep. This information is passed through device tree.
1248 * 2) The CPU low power mode could trigger a system low power mode.
1249 * The low power module relies on Broadcast timer to aggregate the
1250 * next wakeup within a cluster, in which case, CPU switches over to
1251 * use broadcast timer.
1252 */
Maulik Shah296ddb12017-07-03 12:25:54 +05301253
1254 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001255 cpu_pm_enter();
1256
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001257}
1258
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001259static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001260 bool from_idle)
1261{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001262 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001263
Maulik Shah296ddb12017-07-03 12:25:54 +05301264 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001265 cpu_pm_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001266}
1267
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301268static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl,
1269 bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001270{
1271 int state_id = 0;
1272
1273 if (!cluster)
1274 return 0;
1275
1276 spin_lock(&cluster->sync_lock);
1277
1278 if (!cpumask_equal(&cluster->num_children_in_sync,
1279 &cluster->child_cpus))
1280 goto unlock_and_return;
1281
Lina Iyer66fb8272018-04-02 16:30:02 -06001282 state_id += get_cluster_id(cluster->parent, aff_lvl, from_idle);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001283
1284 if (cluster->last_level != cluster->default_level) {
1285 struct lpm_cluster_level *level
1286 = &cluster->levels[cluster->last_level];
1287
Lina Iyer66fb8272018-04-02 16:30:02 -06001288 state_id += (level->psci_id & cluster->psci_mode_mask)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001289 << cluster->psci_mode_shift;
Lina Iyera2243002017-12-21 01:30:38 +00001290
1291 /*
1292 * We may have updated the broadcast timers, update
1293 * the wakeup value by reading the bc timer directly.
1294 */
1295 if (level->notify_rpm)
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301296 if (sys_pm_ops && sys_pm_ops->update_wakeup)
1297 sys_pm_ops->update_wakeup(from_idle);
Lina Iyer66fb8272018-04-02 16:30:02 -06001298 if (cluster->psci_mode_shift)
Lina Iyer0b988942017-11-28 10:13:12 -07001299 (*aff_lvl)++;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001300 }
1301unlock_and_return:
1302 spin_unlock(&cluster->sync_lock);
1303 return state_id;
1304}
1305
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001306static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001307{
Maulik Shah296ddb12017-07-03 12:25:54 +05301308 int affinity_level = 0, state_id = 0, power_state = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001309 bool success = false;
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301310 int ret = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001311 /*
1312 * idx = 0 is the default LPM state
1313 */
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001314
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001315 if (!idx) {
1316 stop_critical_timings();
Raghavendra Kakarlab26d5d92020-01-09 19:09:13 +05301317 cpu_do_idle();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001318 start_critical_timings();
1319 return 1;
1320 }
1321
Maulik Shah296ddb12017-07-03 12:25:54 +05301322 if (from_idle && cpu->levels[idx].use_bc_timer) {
Srinivas Rao L30952dd2018-08-01 18:51:44 +05301323 /*
1324 * tick_broadcast_enter can change the affinity of the
1325 * broadcast timer interrupt, during which interrupt will
1326 * be disabled and enabled back. To avoid system pm ops
1327 * doing any interrupt state save or restore in between
1328 * this window hold the lock.
1329 */
1330 spin_lock(&bc_timer_lock);
1331 ret = tick_broadcast_enter();
1332 spin_unlock(&bc_timer_lock);
1333 if (ret)
Maulik Shah296ddb12017-07-03 12:25:54 +05301334 return success;
1335 }
1336
Raju P.L.S.S.S.N99d241e2017-12-07 00:09:01 +05301337 state_id = get_cluster_id(cpu->parent, &affinity_level, from_idle);
Maulik Shah296ddb12017-07-03 12:25:54 +05301338 power_state = PSCI_POWER_STATE(cpu->levels[idx].is_reset);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001339 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
Lina Iyer66fb8272018-04-02 16:30:02 -06001340 state_id += power_state + affinity_level + cpu->levels[idx].psci_id;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001341
1342 update_debug_pc_event(CPU_ENTER, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301343 0xdeaffeed, 0xdeaffeed, from_idle);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001344 stop_critical_timings();
Maulik Shah296ddb12017-07-03 12:25:54 +05301345
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001346 success = !arm_cpuidle_suspend(state_id);
Maulik Shah296ddb12017-07-03 12:25:54 +05301347
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001348 start_critical_timings();
1349 update_debug_pc_event(CPU_EXIT, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301350 success, 0xdeaffeed, from_idle);
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001351
1352 if (from_idle && cpu->levels[idx].use_bc_timer)
1353 tick_broadcast_exit();
1354
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001355 return success;
1356}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001357
1358static int lpm_cpuidle_select(struct cpuidle_driver *drv,
1359 struct cpuidle_device *dev)
1360{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001361 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001362
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001363 if (!cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001364 return 0;
1365
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001366 return cpu_power_select(dev, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001367}
1368
1369static void update_history(struct cpuidle_device *dev, int idx)
1370{
1371 struct lpm_history *history = &per_cpu(hist, dev->cpu);
1372 uint32_t tmr = 0;
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301373 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001374
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301375 if (!lpm_prediction || !lpm_cpu->lpm_prediction)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001376 return;
1377
1378 if (history->htmr_wkup) {
1379 if (!history->hptr)
1380 history->hptr = MAXSAMPLES-1;
1381 else
1382 history->hptr--;
1383
1384 history->resi[history->hptr] += dev->last_residency;
1385 history->htmr_wkup = 0;
1386 tmr = 1;
1387 } else
1388 history->resi[history->hptr] = dev->last_residency;
1389
1390 history->mode[history->hptr] = idx;
1391
1392 trace_cpu_pred_hist(history->mode[history->hptr],
1393 history->resi[history->hptr], history->hptr, tmr);
1394
1395 if (history->nsamp < MAXSAMPLES)
1396 history->nsamp++;
1397
1398 (history->hptr)++;
1399 if (history->hptr >= MAXSAMPLES)
1400 history->hptr = 0;
1401}
1402
1403static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1404 struct cpuidle_driver *drv, int idx)
1405{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001406 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Maulik Shah4275b222017-03-06 11:04:39 +05301407 bool success = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001408 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
Lina Iyer461ad8f2017-11-09 00:57:38 +00001409 ktime_t start = ktime_get();
1410 uint64_t start_time = ktime_to_ns(start), end_time;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001411
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001412 cpu_prepare(cpu, idx, true);
1413 cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001414
1415 trace_cpu_idle_enter(idx);
1416 lpm_stats_cpu_enter(idx, start_time);
1417
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001418 if (need_resched())
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001419 goto exit;
1420
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001421 success = psci_enter_sleep(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001422
1423exit:
1424 end_time = ktime_to_ns(ktime_get());
1425 lpm_stats_cpu_exit(idx, end_time, success);
1426
Maulik Shah083e22a2018-11-29 14:30:07 +05301427 cluster_unprepare(cpu->parent, cpumask, idx, true, end_time, success);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001428 cpu_unprepare(cpu, idx, true);
Lina Iyer461ad8f2017-11-09 00:57:38 +00001429 dev->last_residency = ktime_us_delta(ktime_get(), start);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001430 update_history(dev, idx);
1431 trace_cpu_idle_exit(idx, success);
Raghavendra Kakarlafbcacdf2018-03-09 18:27:33 +05301432 if (lpm_prediction && cpu->lpm_prediction) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001433 histtimer_cancel();
1434 clusttimer_cancel();
1435 }
Maulik Shaha719e0f2018-05-28 13:57:53 +05301436 local_irq_enable();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001437 return idx;
1438}
1439
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001440static void lpm_cpuidle_freeze(struct cpuidle_device *dev,
1441 struct cpuidle_driver *drv, int idx)
1442{
1443 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
1444 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
Maulik Shah083e22a2018-11-29 14:30:07 +05301445 bool success = false;
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001446
1447 for (; idx >= 0; idx--) {
1448 if (lpm_cpu_mode_allow(dev->cpu, idx, false))
1449 break;
1450 }
1451 if (idx < 0) {
1452 pr_err("Failed suspend\n");
1453 return;
1454 }
1455
1456 cpu_prepare(cpu, idx, true);
1457 cluster_prepare(cpu->parent, cpumask, idx, false, 0);
1458
Maulik Shah083e22a2018-11-29 14:30:07 +05301459 success = psci_enter_sleep(cpu, idx, false);
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001460
Maulik Shah083e22a2018-11-29 14:30:07 +05301461 cluster_unprepare(cpu->parent, cpumask, idx, false, 0, success);
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001462 cpu_unprepare(cpu, idx, true);
1463}
1464
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001465#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1466static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1467 struct cpumask *mask)
1468{
1469 struct cpuidle_device *device;
1470 int cpu, ret;
1471
1472
1473 if (!mask || !drv)
1474 return -EINVAL;
1475
1476 drv->cpumask = mask;
1477 ret = cpuidle_register_driver(drv);
1478 if (ret) {
1479 pr_err("Failed to register cpuidle driver %d\n", ret);
1480 goto failed_driver_register;
1481 }
1482
1483 for_each_cpu(cpu, mask) {
1484 device = &per_cpu(cpuidle_dev, cpu);
1485 device->cpu = cpu;
1486
1487 ret = cpuidle_register_device(device);
1488 if (ret) {
1489 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1490 cpu);
1491 goto failed_driver_register;
1492 }
1493 }
1494 return ret;
1495failed_driver_register:
1496 for_each_cpu(cpu, mask)
1497 cpuidle_unregister_driver(drv);
1498 return ret;
1499}
1500#else
1501static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1502 struct cpumask *mask)
1503{
1504 return cpuidle_register(drv, NULL);
1505}
1506#endif
1507
1508static struct cpuidle_governor lpm_governor = {
1509 .name = "qcom",
1510 .rating = 30,
1511 .select = lpm_cpuidle_select,
1512 .owner = THIS_MODULE,
1513};
1514
1515static int cluster_cpuidle_register(struct lpm_cluster *cl)
1516{
1517 int i = 0, ret = 0;
1518 unsigned int cpu;
1519 struct lpm_cluster *p = NULL;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001520 struct lpm_cpu *lpm_cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001521
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001522 if (list_empty(&cl->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001523 struct lpm_cluster *n;
1524
1525 list_for_each_entry(n, &cl->child, list) {
1526 ret = cluster_cpuidle_register(n);
1527 if (ret)
1528 break;
1529 }
1530 return ret;
1531 }
1532
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001533 list_for_each_entry(lpm_cpu, &cl->cpu, list) {
1534 lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
1535 if (!lpm_cpu->drv)
1536 return -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001537
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001538 lpm_cpu->drv->name = "msm_idle";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001539
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001540 for (i = 0; i < lpm_cpu->nlevels; i++) {
1541 struct cpuidle_state *st = &lpm_cpu->drv->states[i];
1542 struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001543
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001544 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
Archana Sathyakumar46db0dc2017-10-10 11:11:45 -06001545 snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
1546 cpu_level->name);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001547 st->flags = 0;
1548 st->exit_latency = cpu_level->pwr.latency_us;
1549 st->power_usage = cpu_level->pwr.ss_power;
1550 st->target_residency = 0;
1551 st->enter = lpm_cpuidle_enter;
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001552 if (i == lpm_cpu->nlevels - 1)
1553 st->enter_freeze = lpm_cpuidle_freeze;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001554 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001555
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001556 lpm_cpu->drv->state_count = lpm_cpu->nlevels;
1557 lpm_cpu->drv->safe_state_index = 0;
1558 for_each_cpu(cpu, &lpm_cpu->related_cpus)
1559 per_cpu(cpu_lpm, cpu) = lpm_cpu;
1560
1561 for_each_possible_cpu(cpu) {
1562 if (cpu_online(cpu))
1563 continue;
1564 if (per_cpu(cpu_lpm, cpu))
1565 p = per_cpu(cpu_lpm, cpu)->parent;
1566 while (p) {
1567 int j;
1568
1569 spin_lock(&p->sync_lock);
1570 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1571 for (j = 0; j < p->nlevels; j++)
1572 cpumask_copy(
1573 &p->levels[j].num_cpu_votes,
1574 &p->num_children_in_sync);
1575 spin_unlock(&p->sync_lock);
1576 p = p->parent;
1577 }
1578 }
1579 ret = cpuidle_register_cpu(lpm_cpu->drv,
1580 &lpm_cpu->related_cpus);
1581
1582 if (ret) {
1583 kfree(lpm_cpu->drv);
1584 return -ENOMEM;
1585 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001586 }
1587 return 0;
1588}
1589
1590/**
1591 * init_lpm - initializes the governor
1592 */
1593static int __init init_lpm(void)
1594{
1595 return cpuidle_register_governor(&lpm_governor);
1596}
1597
1598postcore_initcall(init_lpm);
1599
1600static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1601 struct lpm_cluster *parent)
1602{
1603 const char **level_name;
1604 int i;
1605
1606 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1607
1608 if (!level_name)
1609 return;
1610
1611 for (i = 0; i < cpu->nlevels; i++)
1612 level_name[i] = cpu->levels[i].name;
1613
1614 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001615 parent->stats, &cpu->related_cpus);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001616
1617 kfree(level_name);
1618}
1619
1620static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1621 struct lpm_cluster *parent)
1622{
1623 const char **level_name;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001624 struct lpm_cluster *child;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001625 struct lpm_cpu *cpu;
1626 int i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001627
1628 if (!cl)
1629 return;
1630
1631 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1632
1633 if (!level_name)
1634 return;
1635
1636 for (i = 0; i < cl->nlevels; i++)
1637 level_name[i] = cl->levels[i].level_name;
1638
1639 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1640 cl->nlevels, parent ? parent->stats : NULL, NULL);
1641
1642 kfree(level_name);
1643
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001644 list_for_each_entry(cpu, &cl->cpu, list) {
1645 pr_err("%s()\n", __func__);
1646 register_cpu_lpm_stats(cpu, cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001647 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001648 if (!list_empty(&cl->cpu))
1649 return;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001650
1651 list_for_each_entry(child, &cl->child, list)
1652 register_cluster_lpm_stats(child, cl);
1653}
1654
1655static int lpm_suspend_prepare(void)
1656{
1657 suspend_in_progress = true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001658 lpm_stats_suspend_enter();
1659
1660 return 0;
1661}
1662
1663static void lpm_suspend_wake(void)
1664{
1665 suspend_in_progress = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001666 lpm_stats_suspend_exit();
1667}
1668
1669static int lpm_suspend_enter(suspend_state_t state)
1670{
1671 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001672 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
1673 struct lpm_cluster *cluster = lpm_cpu->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001674 const struct cpumask *cpumask = get_cpu_mask(cpu);
1675 int idx;
Maulik Shah083e22a2018-11-29 14:30:07 +05301676 bool success;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001677
1678 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001679 if (lpm_cpu_mode_allow(cpu, idx, false))
1680 break;
1681 }
1682 if (idx < 0) {
1683 pr_err("Failed suspend\n");
1684 return 0;
1685 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001686 cpu_prepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001687 cluster_prepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001688
Maulik Shah083e22a2018-11-29 14:30:07 +05301689 success = psci_enter_sleep(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001690
Maulik Shah083e22a2018-11-29 14:30:07 +05301691 cluster_unprepare(cluster, cpumask, idx, false, 0, success);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001692 cpu_unprepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001693 return 0;
1694}
1695
1696static const struct platform_suspend_ops lpm_suspend_ops = {
1697 .enter = lpm_suspend_enter,
1698 .valid = suspend_valid_only_mem,
1699 .prepare_late = lpm_suspend_prepare,
1700 .wake = lpm_suspend_wake,
1701};
1702
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001703static const struct platform_freeze_ops lpm_freeze_ops = {
1704 .prepare = lpm_suspend_prepare,
1705 .restore = lpm_suspend_wake,
1706};
1707
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001708static int lpm_probe(struct platform_device *pdev)
1709{
1710 int ret;
1711 int size;
Raghavendra Kakarlad2766762017-10-26 22:00:13 +05301712 unsigned int cpu;
1713 struct hrtimer *cpu_histtimer;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001714 struct kobject *module_kobj = NULL;
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +05301715 struct md_region md_entry;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001716
1717 get_online_cpus();
1718 lpm_root_node = lpm_of_parse_cluster(pdev);
1719
1720 if (IS_ERR_OR_NULL(lpm_root_node)) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301721 pr_err("Failed to probe low power modes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001722 put_online_cpus();
1723 return PTR_ERR(lpm_root_node);
1724 }
1725
1726 if (print_parsed_dt)
1727 cluster_dt_walkthrough(lpm_root_node);
1728
1729 /*
1730 * Register hotplug notifier before broadcast time to ensure there
1731 * to prevent race where a broadcast timer might not be setup on for a
1732 * core. BUG in existing code but no known issues possibly because of
1733 * how late lpm_levels gets initialized.
1734 */
1735 suspend_set_ops(&lpm_suspend_ops);
Stephen Boyd5b4972f2017-08-10 16:21:26 -07001736 freeze_set_ops(&lpm_freeze_ops);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001737 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Raghavendra Kakarlad2766762017-10-26 22:00:13 +05301738 for_each_possible_cpu(cpu) {
1739 cpu_histtimer = &per_cpu(histtimer, cpu);
1740 hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1741 }
1742
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001743 cluster_timer_init(lpm_root_node);
1744
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001745 size = num_dbg_elements * sizeof(struct lpm_debug);
1746 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1747 &lpm_debug_phys, GFP_KERNEL);
Maulik Shah296ddb12017-07-03 12:25:54 +05301748
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001749 register_cluster_lpm_stats(lpm_root_node, NULL);
1750
1751 ret = cluster_cpuidle_register(lpm_root_node);
1752 put_online_cpus();
1753 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301754 pr_err("Failed to register with cpuidle framework\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001755 goto failed;
1756 }
Maulik Shah296ddb12017-07-03 12:25:54 +05301757
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -06001758 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1759 "AP_QCOM_SLEEP_STARTING",
1760 lpm_starting_cpu, lpm_dying_cpu);
1761 if (ret)
1762 goto failed;
1763
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001764 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1765 if (!module_kobj) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301766 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001767 ret = -ENOENT;
1768 goto failed;
1769 }
1770
1771 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1772 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301773 pr_err("Failed to create cluster level nodes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001774 goto failed;
1775 }
1776
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +05301777 /* Add lpm_debug to Minidump*/
1778 strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
1779 md_entry.virt_addr = (uintptr_t)lpm_debug;
1780 md_entry.phys_addr = lpm_debug_phys;
1781 md_entry.size = size;
Lingutla Chandrasekhar1e4616782018-08-13 17:07:14 +05301782 if (msm_minidump_add_region(&md_entry) < 0)
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +05301783 pr_info("Failed to add lpm_debug in Minidump\n");
1784
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001785 return 0;
1786failed:
1787 free_cluster_node(lpm_root_node);
1788 lpm_root_node = NULL;
1789 return ret;
1790}
1791
1792static const struct of_device_id lpm_mtch_tbl[] = {
1793 {.compatible = "qcom,lpm-levels"},
1794 {},
1795};
1796
1797static struct platform_driver lpm_driver = {
1798 .probe = lpm_probe,
1799 .driver = {
1800 .name = "lpm-levels",
1801 .owner = THIS_MODULE,
Maulik Shah9f689082018-07-11 09:49:49 +05301802 .suppress_bind_attrs = true,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001803 .of_match_table = lpm_mtch_tbl,
1804 },
1805};
1806
1807static int __init lpm_levels_module_init(void)
1808{
1809 int rc;
1810
Lina Iyer03014652017-12-13 22:37:36 +00001811#ifdef CONFIG_ARM
1812 int cpu;
1813
1814 for_each_possible_cpu(cpu) {
Maulik Shah01dcec62018-03-02 17:17:01 +05301815 rc = arm_cpuidle_init(cpu);
Lina Iyer03014652017-12-13 22:37:36 +00001816 if (rc) {
1817 pr_err("CPU%d ARM CPUidle init failed (%d)\n", cpu, rc);
1818 return rc;
1819 }
1820 }
1821#endif
1822
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001823 rc = platform_driver_register(&lpm_driver);
Maulik Shah83f2d492018-05-07 12:19:17 +05301824 if (rc)
1825 pr_info("Error registering %s rc=%d\n", lpm_driver.driver.name,
1826 rc);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001827
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001828 return rc;
1829}
1830late_initcall(lpm_levels_module_init);