blob: 4e3ca4feff0669b86a518ce5b9fff9466b0a3a03 [file] [log] [blame]
Lina Iyer514248d2017-02-07 15:10:53 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
3 * Copyright (C) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
Maulik Shah296ddb12017-07-03 12:25:54 +053016#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
17
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070018#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/mutex.h>
24#include <linux/cpu.h>
25#include <linux/of.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070026#include <linux/hrtimer.h>
27#include <linux/ktime.h>
28#include <linux/tick.h>
29#include <linux/suspend.h>
30#include <linux/pm_qos.h>
31#include <linux/of_platform.h>
32#include <linux/smp.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070033#include <linux/dma-mapping.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070034#include <linux/moduleparam.h>
35#include <linux/sched.h>
36#include <linux/cpu_pm.h>
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -060037#include <linux/cpuhotplug.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070038#include <soc/qcom/pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070039#include <soc/qcom/event_timer.h>
40#include <soc/qcom/lpm-stats.h>
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -060041#include <soc/qcom/system_pm.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070042#include <asm/arch_timer.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070043#include <asm/suspend.h>
44#include <asm/cpuidle.h>
45#include "lpm-levels.h"
46#include <trace/events/power.h>
Maulik Shahdf5cbe32017-08-17 23:22:34 +053047#include "../clk/clk.h"
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070048#define CREATE_TRACE_POINTS
49#include <trace/events/trace_msm_low_power.h>
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070050
51#define SCLK_HZ (32768)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070052#define PSCI_POWER_STATE(reset) (reset << 30)
53#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070054#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070055
56enum {
57 MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
58 MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
59};
60
61enum debug_event {
62 CPU_ENTER,
63 CPU_EXIT,
64 CLUSTER_ENTER,
65 CLUSTER_EXIT,
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -060066 CPU_HP_STARTING,
67 CPU_HP_DYING,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070068};
69
70struct lpm_debug {
71 cycle_t time;
72 enum debug_event evt;
73 int cpu;
74 uint32_t arg1;
75 uint32_t arg2;
76 uint32_t arg3;
77 uint32_t arg4;
78};
79
80struct lpm_cluster *lpm_root_node;
81
82#define MAXSAMPLES 5
83
84static bool lpm_prediction = true;
85module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
86
87static uint32_t ref_stddev = 100;
88module_param_named(ref_stddev, ref_stddev, uint, 0664);
89
90static uint32_t tmr_add = 100;
91module_param_named(tmr_add, tmr_add, uint, 0664);
92
Dilip Gudlur3bbafc02017-07-25 12:46:31 -070093static uint32_t bias_hyst;
94module_param_named(bias_hyst, bias_hyst, uint, 0664);
95
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070096struct lpm_history {
97 uint32_t resi[MAXSAMPLES];
98 int mode[MAXSAMPLES];
99 int nsamp;
100 uint32_t hptr;
101 uint32_t hinvalid;
102 uint32_t htmr_wkup;
103 int64_t stime;
104};
105
106static DEFINE_PER_CPU(struct lpm_history, hist);
107
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600108static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700109static bool suspend_in_progress;
110static struct hrtimer lpm_hrtimer;
111static struct hrtimer histtimer;
112static struct lpm_debug *lpm_debug;
113static phys_addr_t lpm_debug_phys;
114static const int num_dbg_elements = 0x100;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700115
116static void cluster_unprepare(struct lpm_cluster *cluster,
117 const struct cpumask *cpu, int child_idx, bool from_idle,
118 int64_t time);
119static void cluster_prepare(struct lpm_cluster *cluster,
120 const struct cpumask *cpu, int child_idx, bool from_idle,
121 int64_t time);
122
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700123static bool menu_select;
124module_param_named(menu_select, menu_select, bool, 0664);
125
126static int msm_pm_sleep_time_override;
127module_param_named(sleep_time_override,
128 msm_pm_sleep_time_override, int, 0664);
129static uint64_t suspend_wake_time;
130
131static bool print_parsed_dt;
132module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
133
134static bool sleep_disabled;
135module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
136
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700137/**
138 * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
139 *
140 * Returns an s32 latency value
141 */
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700142s32 msm_cpuidle_get_deep_idle_latency(void)
143{
144 return 10;
145}
Karthikeyan Manid6681c12017-06-27 13:44:59 -0700146EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700147
148void lpm_suspend_wake_time(uint64_t wakeup_time)
149{
150 if (wakeup_time <= 0) {
151 suspend_wake_time = msm_pm_sleep_time_override;
152 return;
153 }
154
155 if (msm_pm_sleep_time_override &&
156 (msm_pm_sleep_time_override < wakeup_time))
157 suspend_wake_time = msm_pm_sleep_time_override;
158 else
159 suspend_wake_time = wakeup_time;
160}
161EXPORT_SYMBOL(lpm_suspend_wake_time);
162
163static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
164 struct latency_level *lat_level)
165{
166 struct list_head *list;
167 struct lpm_cluster_level *level;
168 struct lpm_cluster *n;
169 struct power_params *pwr_params;
170 uint32_t latency = 0;
171 int i;
172
173 if (!cluster->list.next) {
174 for (i = 0; i < cluster->nlevels; i++) {
175 level = &cluster->levels[i];
176 pwr_params = &level->pwr;
177 if (lat_level->reset_level == level->reset_level) {
178 if ((latency > pwr_params->latency_us)
179 || (!latency))
180 latency = pwr_params->latency_us;
181 break;
182 }
183 }
184 } else {
185 list_for_each(list, &cluster->parent->child) {
186 n = list_entry(list, typeof(*n), list);
187 if (lat_level->level_name) {
188 if (strcmp(lat_level->level_name,
189 n->cluster_name))
190 continue;
191 }
192 for (i = 0; i < n->nlevels; i++) {
193 level = &n->levels[i];
194 pwr_params = &level->pwr;
195 if (lat_level->reset_level ==
196 level->reset_level) {
197 if ((latency > pwr_params->latency_us)
198 || (!latency))
199 latency =
200 pwr_params->latency_us;
201 break;
202 }
203 }
204 }
205 }
206 return latency;
207}
208
209static uint32_t least_cpu_latency(struct list_head *child,
210 struct latency_level *lat_level)
211{
212 struct list_head *list;
213 struct lpm_cpu_level *level;
214 struct power_params *pwr_params;
215 struct lpm_cpu *cpu;
216 struct lpm_cluster *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600217 uint32_t lat = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700218 int i;
219
220 list_for_each(list, child) {
221 n = list_entry(list, typeof(*n), list);
222 if (lat_level->level_name) {
223 if (strcmp(lat_level->level_name, n->cluster_name))
224 continue;
225 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600226 list_for_each_entry(cpu, &n->cpu, list) {
227 for (i = 0; i < cpu->nlevels; i++) {
228 level = &cpu->levels[i];
229 pwr_params = &level->pwr;
230 if (lat_level->reset_level
231 == level->reset_level) {
232 if ((lat > pwr_params->latency_us)
233 || (!lat))
234 lat = pwr_params->latency_us;
235 break;
236 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700237 }
238 }
239 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600240 return lat;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700241}
242
243static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
244 int affinity_level)
245{
246 struct lpm_cluster *n;
247
248 if ((cluster->aff_level == affinity_level)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600249 || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700250 return cluster;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600251 else if (list_empty(&cluster->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700252 n = list_entry(cluster->child.next, typeof(*n), list);
253 return cluster_aff_match(n, affinity_level);
254 } else
255 return NULL;
256}
257
258int lpm_get_latency(struct latency_level *level, uint32_t *latency)
259{
260 struct lpm_cluster *cluster;
261 uint32_t val;
262
263 if (!lpm_root_node) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530264 pr_err("lpm_probe not completed\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700265 return -EAGAIN;
266 }
267
268 if ((level->affinity_level < 0)
269 || (level->affinity_level > lpm_root_node->aff_level)
270 || (level->reset_level < LPM_RESET_LVL_RET)
271 || (level->reset_level > LPM_RESET_LVL_PC)
272 || !latency)
273 return -EINVAL;
274
275 cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
276 if (!cluster) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530277 pr_err("No matching cluster found for affinity_level:%d\n",
278 level->affinity_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700279 return -EINVAL;
280 }
281
282 if (level->affinity_level == 0)
283 val = least_cpu_latency(&cluster->parent->child, level);
284 else
285 val = least_cluster_latency(cluster, level);
286
287 if (!val) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530288 pr_err("No mode with affinity_level:%d reset_level:%d\n",
289 level->affinity_level, level->reset_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700290 return -EINVAL;
291 }
292
293 *latency = val;
294
295 return 0;
296}
297EXPORT_SYMBOL(lpm_get_latency);
298
299static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
300 uint32_t arg2, uint32_t arg3, uint32_t arg4)
301{
302 struct lpm_debug *dbg;
303 int idx;
304 static DEFINE_SPINLOCK(debug_lock);
305 static int pc_event_index;
306
307 if (!lpm_debug)
308 return;
309
310 spin_lock(&debug_lock);
311 idx = pc_event_index++;
312 dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
313
314 dbg->evt = event;
315 dbg->time = arch_counter_get_cntvct();
316 dbg->cpu = raw_smp_processor_id();
317 dbg->arg1 = arg1;
318 dbg->arg2 = arg2;
319 dbg->arg3 = arg3;
320 dbg->arg4 = arg4;
321 spin_unlock(&debug_lock);
322}
323
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600324static int lpm_dying_cpu(unsigned int cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700325{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600326 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700327
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600328 update_debug_pc_event(CPU_HP_DYING, cpu,
329 cluster->num_children_in_sync.bits[0],
330 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600331 cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
332 return 0;
333}
334
335static int lpm_starting_cpu(unsigned int cpu)
336{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600337 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600338
Mahesh Sivasubramanian7fa25e42017-04-13 08:55:52 -0600339 update_debug_pc_event(CPU_HP_STARTING, cpu,
340 cluster->num_children_in_sync.bits[0],
341 cluster->child_cpus.bits[0], false);
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -0600342 cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
343 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700344}
345
346static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
347{
348 return HRTIMER_NORESTART;
349}
350
351static void histtimer_cancel(void)
352{
353 hrtimer_try_to_cancel(&histtimer);
354}
355
356static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
357{
358 int cpu = raw_smp_processor_id();
359 struct lpm_history *history = &per_cpu(hist, cpu);
360
361 history->hinvalid = 1;
362 return HRTIMER_NORESTART;
363}
364
365static void histtimer_start(uint32_t time_us)
366{
367 uint64_t time_ns = time_us * NSEC_PER_USEC;
368 ktime_t hist_ktime = ns_to_ktime(time_ns);
369
370 histtimer.function = histtimer_fn;
371 hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
372}
373
374static void cluster_timer_init(struct lpm_cluster *cluster)
375{
376 struct list_head *list;
377
378 if (!cluster)
379 return;
380
381 hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
382
383 list_for_each(list, &cluster->child) {
384 struct lpm_cluster *n;
385
386 n = list_entry(list, typeof(*n), list);
387 cluster_timer_init(n);
388 }
389}
390
391static void clusttimer_cancel(void)
392{
393 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600394 struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700395
396 hrtimer_try_to_cancel(&cluster->histtimer);
Mahesh Sivasubramanian67011dc2017-03-01 15:00:18 -0700397
398 if (cluster->parent)
399 hrtimer_try_to_cancel(&cluster->parent->histtimer);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700400}
401
402static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
403{
404 struct lpm_cluster *cluster = container_of(h,
405 struct lpm_cluster, histtimer);
406
407 cluster->history.hinvalid = 1;
408 return HRTIMER_NORESTART;
409}
410
411static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
412{
413 uint64_t time_ns = time_us * NSEC_PER_USEC;
414 ktime_t clust_ktime = ns_to_ktime(time_ns);
415
416 cluster->histtimer.function = clusttimer_fn;
417 hrtimer_start(&cluster->histtimer, clust_ktime,
418 HRTIMER_MODE_REL_PINNED);
419}
420
421static void msm_pm_set_timer(uint32_t modified_time_us)
422{
423 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
424 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
425
426 lpm_hrtimer.function = lpm_hrtimer_cb;
427 hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
428}
429
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700430static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
431 struct lpm_cpu *cpu, int *idx_restrict,
432 uint32_t *idx_restrict_time)
433{
434 int i, j, divisor;
435 uint64_t max, avg, stddev;
436 int64_t thresh = LLONG_MAX;
437 struct lpm_history *history = &per_cpu(hist, dev->cpu);
438 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
439
440 if (!lpm_prediction)
441 return 0;
442
443 /*
444 * Samples are marked invalid when woken-up due to timer,
445 * so donot predict.
446 */
447 if (history->hinvalid) {
448 history->hinvalid = 0;
449 history->htmr_wkup = 1;
450 history->stime = 0;
451 return 0;
452 }
453
454 /*
455 * Predict only when all the samples are collected.
456 */
457 if (history->nsamp < MAXSAMPLES) {
458 history->stime = 0;
459 return 0;
460 }
461
462 /*
463 * Check if the samples are not much deviated, if so use the
464 * average of those as predicted sleep time. Else if any
465 * specific mode has more premature exits return the index of
466 * that mode.
467 */
468
469again:
470 max = avg = divisor = stddev = 0;
471 for (i = 0; i < MAXSAMPLES; i++) {
472 int64_t value = history->resi[i];
473
474 if (value <= thresh) {
475 avg += value;
476 divisor++;
477 if (value > max)
478 max = value;
479 }
480 }
481 do_div(avg, divisor);
482
483 for (i = 0; i < MAXSAMPLES; i++) {
484 int64_t value = history->resi[i];
485
486 if (value <= thresh) {
487 int64_t diff = value - avg;
488
489 stddev += diff * diff;
490 }
491 }
492 do_div(stddev, divisor);
493 stddev = int_sqrt(stddev);
494
495 /*
496 * If the deviation is less, return the average, else
497 * ignore one maximum sample and retry
498 */
499 if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
500 || stddev <= ref_stddev) {
501 history->stime = ktime_to_us(ktime_get()) + avg;
502 return avg;
503 } else if (divisor > (MAXSAMPLES - 1)) {
504 thresh = max - 1;
505 goto again;
506 }
507
508 /*
509 * Find the number of premature exits for each of the mode,
510 * excluding clockgating mode, and they are more than fifty
511 * percent restrict that and deeper modes.
512 */
513 if (history->htmr_wkup != 1) {
514 for (j = 1; j < cpu->nlevels; j++) {
515 uint32_t failed = 0;
516 uint64_t total = 0;
517
518 for (i = 0; i < MAXSAMPLES; i++) {
519 if ((history->mode[i] == j) &&
520 (history->resi[i] < min_residency[j])) {
521 failed++;
522 total += history->resi[i];
523 }
524 }
525 if (failed > (MAXSAMPLES/2)) {
526 *idx_restrict = j;
527 do_div(total, failed);
528 *idx_restrict_time = total;
529 history->stime = ktime_to_us(ktime_get())
530 + *idx_restrict_time;
531 break;
532 }
533 }
534 }
535 return 0;
536}
537
538static inline void invalidate_predict_history(struct cpuidle_device *dev)
539{
540 struct lpm_history *history = &per_cpu(hist, dev->cpu);
541
542 if (!lpm_prediction)
543 return;
544
545 if (history->hinvalid) {
546 history->hinvalid = 0;
547 history->htmr_wkup = 1;
548 history->stime = 0;
549 }
550}
551
552static void clear_predict_history(void)
553{
554 struct lpm_history *history;
555 int i;
556 unsigned int cpu;
557
558 if (!lpm_prediction)
559 return;
560
561 for_each_possible_cpu(cpu) {
562 history = &per_cpu(hist, cpu);
563 for (i = 0; i < MAXSAMPLES; i++) {
564 history->resi[i] = 0;
565 history->mode[i] = -1;
566 history->hptr = 0;
567 history->nsamp = 0;
568 history->stime = 0;
569 }
570 }
571}
572
573static void update_history(struct cpuidle_device *dev, int idx);
574
Lina Iyer634bfda2017-07-06 16:16:57 -0600575static inline bool is_cpu_biased(int cpu)
576{
577 u64 now = sched_clock();
578 u64 last = sched_get_cpu_last_busy_time(cpu);
579
580 if (!last)
581 return false;
582
583 return (now - last) < BIAS_HYST;
584}
585
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700586static int cpu_power_select(struct cpuidle_device *dev,
587 struct lpm_cpu *cpu)
588{
Lina Iyerb4a0c3d2017-07-17 11:50:25 -0600589 int best_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700590 uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
591 dev->cpu);
Naresh Malladi64b40552017-07-03 15:53:48 +0530592 s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700593 uint32_t modified_time_us = 0;
594 uint32_t next_event_us = 0;
595 int i, idx_restrict;
596 uint32_t lvl_latency_us = 0;
597 uint64_t predicted = 0;
598 uint32_t htime = 0, idx_restrict_time = 0;
Naresh Malladi64b40552017-07-03 15:53:48 +0530599 uint32_t next_wakeup_us = (uint32_t)sleep_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700600 uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
601 uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
602
Maulik Shah32b352c2017-04-18 20:48:44 +0530603 if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
Maulik Shah4275b222017-03-06 11:04:39 +0530604 return best_level;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700605
606 idx_restrict = cpu->nlevels + 1;
607
608 next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
609
Maulik Shah4275b222017-03-06 11:04:39 +0530610 if (is_cpu_biased(dev->cpu))
Lina Iyer634bfda2017-07-06 16:16:57 -0600611 goto done_select;
Lina Iyer634bfda2017-07-06 16:16:57 -0600612
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700613 for (i = 0; i < cpu->nlevels; i++) {
614 struct lpm_cpu_level *level = &cpu->levels[i];
615 struct power_params *pwr_params = &level->pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700616 bool allow;
617
618 allow = lpm_cpu_mode_allow(dev->cpu, i, true);
619
620 if (!allow)
621 continue;
622
623 lvl_latency_us = pwr_params->latency_us;
624
625 if (latency_us < lvl_latency_us)
626 break;
627
628 if (next_event_us) {
629 if (next_event_us < lvl_latency_us)
630 break;
631
632 if (((next_event_us - lvl_latency_us) < sleep_us) ||
633 (next_event_us < sleep_us))
634 next_wakeup_us = next_event_us - lvl_latency_us;
635 }
636
637 if (!i) {
638 /*
639 * If the next_wake_us itself is not sufficient for
640 * deeper low power modes than clock gating do not
641 * call prediction.
642 */
643 if (next_wakeup_us > max_residency[i]) {
644 predicted = lpm_cpuidle_predict(dev, cpu,
645 &idx_restrict, &idx_restrict_time);
Srinivas Rao Le3af7dc2017-05-31 16:06:52 +0530646 if (predicted && (predicted < min_residency[i]))
647 predicted = min_residency[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700648 } else
649 invalidate_predict_history(dev);
650 }
651
652 if (i >= idx_restrict)
653 break;
654
655 best_level = i;
656
Maulik Shah296ddb12017-07-03 12:25:54 +0530657 if (next_event_us && next_event_us < sleep_us && !i)
658 modified_time_us = next_event_us - lvl_latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700659 else
660 modified_time_us = 0;
661
662 if (predicted ? (predicted <= max_residency[i])
663 : (next_wakeup_us <= max_residency[i]))
664 break;
665 }
666
667 if (modified_time_us)
668 msm_pm_set_timer(modified_time_us);
669
670 /*
671 * Start timer to avoid staying in shallower mode forever
672 * incase of misprediciton
673 */
674 if ((predicted || (idx_restrict != (cpu->nlevels + 1)))
675 && ((best_level >= 0)
676 && (best_level < (cpu->nlevels-1)))) {
677 htime = predicted + tmr_add;
678 if (htime == tmr_add)
679 htime = idx_restrict_time;
680 else if (htime > max_residency[best_level])
681 htime = max_residency[best_level];
682
683 if ((next_wakeup_us > htime) &&
684 ((next_wakeup_us - htime) > max_residency[best_level]))
685 histtimer_start(htime);
686 }
687
Lina Iyer634bfda2017-07-06 16:16:57 -0600688done_select:
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700689 trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
690
691 trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
692 predicted, htime);
693
694 return best_level;
695}
696
697static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
698 struct cpumask *mask, bool from_idle, uint32_t *pred_time)
699{
700 int cpu;
701 int next_cpu = raw_smp_processor_id();
702 ktime_t next_event;
703 struct cpumask online_cpus_in_cluster;
704 struct lpm_history *history;
705 int64_t prediction = LONG_MAX;
706
707 next_event.tv64 = KTIME_MAX;
708 if (!suspend_wake_time)
709 suspend_wake_time = msm_pm_sleep_time_override;
710 if (!from_idle) {
711 if (mask)
712 cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
713 if (!suspend_wake_time)
714 return ~0ULL;
715 else
716 return USEC_PER_SEC * suspend_wake_time;
717 }
718
719 cpumask_and(&online_cpus_in_cluster,
720 &cluster->num_children_in_sync, cpu_online_mask);
721
722 for_each_cpu(cpu, &online_cpus_in_cluster) {
723 ktime_t *next_event_c;
724
725 next_event_c = get_next_event_cpu(cpu);
726 if (next_event_c->tv64 < next_event.tv64) {
727 next_event.tv64 = next_event_c->tv64;
728 next_cpu = cpu;
729 }
730
731 if (from_idle && lpm_prediction) {
732 history = &per_cpu(hist, cpu);
733 if (history->stime && (history->stime < prediction))
734 prediction = history->stime;
735 }
736 }
737
738 if (mask)
739 cpumask_copy(mask, cpumask_of(next_cpu));
740
741 if (from_idle && lpm_prediction) {
742 if (prediction > ktime_to_us(ktime_get()))
743 *pred_time = prediction - ktime_to_us(ktime_get());
744 }
745
746 if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
747 return ktime_to_us(ktime_sub(next_event, ktime_get()));
748 else
749 return 0;
750}
751
752static int cluster_predict(struct lpm_cluster *cluster,
753 uint32_t *pred_us)
754{
755 int i, j;
756 int ret = 0;
757 struct cluster_history *history = &cluster->history;
758 int64_t cur_time = ktime_to_us(ktime_get());
759
760 if (!lpm_prediction)
761 return 0;
762
763 if (history->hinvalid) {
764 history->hinvalid = 0;
765 history->htmr_wkup = 1;
766 history->flag = 0;
767 return ret;
768 }
769
770 if (history->nsamp == MAXSAMPLES) {
771 for (i = 0; i < MAXSAMPLES; i++) {
772 if ((cur_time - history->stime[i])
773 > CLUST_SMPL_INVLD_TIME)
774 history->nsamp--;
775 }
776 }
777
778 if (history->nsamp < MAXSAMPLES) {
779 history->flag = 0;
780 return ret;
781 }
782
783 if (history->flag == 2)
784 history->flag = 0;
785
786 if (history->htmr_wkup != 1) {
787 uint64_t total = 0;
788
789 if (history->flag == 1) {
790 for (i = 0; i < MAXSAMPLES; i++)
791 total += history->resi[i];
792 do_div(total, MAXSAMPLES);
793 *pred_us = total;
794 return 2;
795 }
796
797 for (j = 1; j < cluster->nlevels; j++) {
798 uint32_t failed = 0;
799
800 total = 0;
801 for (i = 0; i < MAXSAMPLES; i++) {
802 if ((history->mode[i] == j) && (history->resi[i]
803 < cluster->levels[j].pwr.min_residency)) {
804 failed++;
805 total += history->resi[i];
806 }
807 }
808
809 if (failed > (MAXSAMPLES-2)) {
810 do_div(total, failed);
811 *pred_us = total;
812 history->flag = 1;
813 return 1;
814 }
815 }
816 }
817
818 return ret;
819}
820
821static void update_cluster_history_time(struct cluster_history *history,
822 int idx, uint64_t start)
823{
824 history->entry_idx = idx;
825 history->entry_time = start;
826}
827
828static void update_cluster_history(struct cluster_history *history, int idx)
829{
830 uint32_t tmr = 0;
831 uint32_t residency = 0;
832 struct lpm_cluster *cluster =
833 container_of(history, struct lpm_cluster, history);
834
835 if (!lpm_prediction)
836 return;
837
838 if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
839 residency = ktime_to_us(ktime_get()) - history->entry_time;
840 history->stime[history->hptr] = history->entry_time;
841 } else
842 return;
843
844 if (history->htmr_wkup) {
845 if (!history->hptr)
846 history->hptr = MAXSAMPLES-1;
847 else
848 history->hptr--;
849
850 history->resi[history->hptr] += residency;
851
852 history->htmr_wkup = 0;
853 tmr = 1;
Maulik Shah296ddb12017-07-03 12:25:54 +0530854 } else
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700855 history->resi[history->hptr] = residency;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700856
857 history->mode[history->hptr] = idx;
858
859 history->entry_idx = INT_MIN;
860 history->entry_time = 0;
861
862 if (history->nsamp < MAXSAMPLES)
863 history->nsamp++;
864
865 trace_cluster_pred_hist(cluster->cluster_name,
866 history->mode[history->hptr], history->resi[history->hptr],
867 history->hptr, tmr);
868
869 (history->hptr)++;
870
871 if (history->hptr >= MAXSAMPLES)
872 history->hptr = 0;
873}
874
875static void clear_cl_history_each(struct cluster_history *history)
876{
877 int i;
878
879 for (i = 0; i < MAXSAMPLES; i++) {
880 history->resi[i] = 0;
881 history->mode[i] = -1;
882 history->stime[i] = 0;
883 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530884
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700885 history->hptr = 0;
886 history->nsamp = 0;
887 history->flag = 0;
888 history->hinvalid = 0;
889 history->htmr_wkup = 0;
890}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700891static void clear_cl_predict_history(void)
892{
893 struct lpm_cluster *cluster = lpm_root_node;
894 struct list_head *list;
895
896 if (!lpm_prediction)
897 return;
898
899 clear_cl_history_each(&cluster->history);
900
901 list_for_each(list, &cluster->child) {
902 struct lpm_cluster *n;
903
904 n = list_entry(list, typeof(*n), list);
905 clear_cl_history_each(&n->history);
906 }
907}
908
909static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
910 int *ispred)
911{
912 int best_level = -1;
913 int i;
914 struct cpumask mask;
915 uint32_t latency_us = ~0U;
916 uint32_t sleep_us;
917 uint32_t cpupred_us = 0, pred_us = 0;
918 int pred_mode = 0, predicted = 0;
919
920 if (!cluster)
921 return -EINVAL;
922
923 sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
924 from_idle, &cpupred_us);
925
926 if (from_idle) {
927 pred_mode = cluster_predict(cluster, &pred_us);
928
929 if (cpupred_us && pred_mode && (cpupred_us < pred_us))
930 pred_us = cpupred_us;
931
932 if (pred_us && pred_mode && (pred_us < sleep_us))
933 predicted = 1;
934
935 if (predicted && (pred_us == cpupred_us))
936 predicted = 2;
937 }
938
939 if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
940 latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
941 &mask);
942
943 /*
944 * If atleast one of the core in the cluster is online, the cluster
945 * low power modes should be determined by the idle characteristics
946 * even if the last core enters the low power mode as a part of
947 * hotplug.
948 */
949
950 if (!from_idle && num_online_cpus() > 1 &&
951 cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
952 from_idle = true;
953
954 for (i = 0; i < cluster->nlevels; i++) {
955 struct lpm_cluster_level *level = &cluster->levels[i];
956 struct power_params *pwr_params = &level->pwr;
957
958 if (!lpm_cluster_mode_allow(cluster, i, from_idle))
959 continue;
960
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700961 if (!cpumask_equal(&cluster->num_children_in_sync,
962 &level->num_cpu_votes))
963 continue;
964
965 if (from_idle && latency_us < pwr_params->latency_us)
966 break;
967
968 if (sleep_us < pwr_params->time_overhead_us)
969 break;
970
971 if (suspend_in_progress && from_idle && level->notify_rpm)
972 continue;
973
Lina Iyerc28bf7d2017-08-14 19:28:46 -0600974 if (level->is_reset && !system_sleep_allowed())
975 continue;
976
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700977 best_level = i;
978
Srinivas Rao L1f3e4d52016-12-29 18:49:54 +0530979 if (from_idle &&
980 (predicted ? (pred_us <= pwr_params->max_residency)
981 : (sleep_us <= pwr_params->max_residency)))
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700982 break;
983 }
984
985 if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
986 cluster->history.flag = 2;
987
988 *ispred = predicted;
989
990 trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
991 latency_us, predicted, pred_us);
992
993 return best_level;
994}
995
996static void cluster_notify(struct lpm_cluster *cluster,
997 struct lpm_cluster_level *level, bool enter)
998{
999 if (level->is_reset && enter)
1000 cpu_cluster_pm_enter(cluster->aff_level);
1001 else if (level->is_reset && !enter)
1002 cpu_cluster_pm_exit(cluster->aff_level);
1003}
1004
1005static int cluster_configure(struct lpm_cluster *cluster, int idx,
1006 bool from_idle, int predicted)
1007{
1008 struct lpm_cluster_level *level = &cluster->levels[idx];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001009
1010 if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
1011 || is_IPI_pending(&cluster->num_children_in_sync)) {
1012 return -EPERM;
1013 }
1014
1015 if (idx != cluster->default_level) {
1016 update_debug_pc_event(CLUSTER_ENTER, idx,
1017 cluster->num_children_in_sync.bits[0],
1018 cluster->child_cpus.bits[0], from_idle);
1019 trace_cluster_enter(cluster->cluster_name, idx,
1020 cluster->num_children_in_sync.bits[0],
1021 cluster->child_cpus.bits[0], from_idle);
1022 lpm_stats_cluster_enter(cluster->stats, idx);
1023
1024 if (from_idle && lpm_prediction)
1025 update_cluster_history_time(&cluster->history, idx,
1026 ktime_to_us(ktime_get()));
1027 }
1028
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001029 if (level->notify_rpm) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001030 uint64_t us;
1031 uint32_t pred_us;
1032
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001033 us = get_cluster_sleep_time(cluster, NULL, from_idle,
Maulik Shah296ddb12017-07-03 12:25:54 +05301034 &pred_us);
1035
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001036 us = us + 1;
Maulik Shah296ddb12017-07-03 12:25:54 +05301037
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001038 clear_predict_history();
1039 clear_cl_predict_history();
1040
Lina Iyerc28bf7d2017-08-14 19:28:46 -06001041 if (system_sleep_enter(us))
1042 return -EBUSY;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001043 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001044 /* Notify cluster enter event after successfully config completion */
1045 cluster_notify(cluster, level, true);
1046
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001047 cluster->last_level = idx;
1048
1049 if (predicted && (idx < (cluster->nlevels - 1))) {
1050 struct power_params *pwr_params = &cluster->levels[idx].pwr;
1051
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001052 clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001053 }
1054
1055 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001056}
1057
1058static void cluster_prepare(struct lpm_cluster *cluster,
1059 const struct cpumask *cpu, int child_idx, bool from_idle,
1060 int64_t start_time)
1061{
1062 int i;
1063 int predicted = 0;
1064
1065 if (!cluster)
1066 return;
1067
1068 if (cluster->min_child_level > child_idx)
1069 return;
1070
1071 spin_lock(&cluster->sync_lock);
1072 cpumask_or(&cluster->num_children_in_sync, cpu,
1073 &cluster->num_children_in_sync);
1074
1075 for (i = 0; i < cluster->nlevels; i++) {
1076 struct lpm_cluster_level *lvl = &cluster->levels[i];
1077
1078 if (child_idx >= lvl->min_child_level)
1079 cpumask_or(&lvl->num_cpu_votes, cpu,
1080 &lvl->num_cpu_votes);
1081 }
1082
1083 /*
1084 * cluster_select() does not make any configuration changes. So its ok
1085 * to release the lock here. If a core wakes up for a rude request,
1086 * it need not wait for another to finish its cluster selection and
1087 * configuration process
1088 */
1089
1090 if (!cpumask_equal(&cluster->num_children_in_sync,
1091 &cluster->child_cpus))
1092 goto failed;
1093
1094 i = cluster_select(cluster, from_idle, &predicted);
1095
1096 if (((i < 0) || (i == cluster->default_level))
1097 && predicted && from_idle) {
1098 update_cluster_history_time(&cluster->history,
1099 -1, ktime_to_us(ktime_get()));
1100
1101 if (i < 0) {
1102 struct power_params *pwr_params =
1103 &cluster->levels[0].pwr;
1104
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001105 clusttimer_start(cluster,
1106 pwr_params->max_residency + tmr_add);
Maulik Shah296ddb12017-07-03 12:25:54 +05301107
1108 goto failed;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001109 }
1110 }
1111
1112 if (i < 0)
1113 goto failed;
1114
1115 if (cluster_configure(cluster, i, from_idle, predicted))
1116 goto failed;
1117
1118 cluster->stats->sleep_time = start_time;
1119 cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
1120 from_idle, start_time);
1121
1122 spin_unlock(&cluster->sync_lock);
1123 return;
1124failed:
1125 spin_unlock(&cluster->sync_lock);
1126 cluster->stats->sleep_time = 0;
1127}
1128
1129static void cluster_unprepare(struct lpm_cluster *cluster,
1130 const struct cpumask *cpu, int child_idx, bool from_idle,
1131 int64_t end_time)
1132{
1133 struct lpm_cluster_level *level;
1134 bool first_cpu;
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -06001135 int last_level, i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001136
1137 if (!cluster)
1138 return;
1139
1140 if (cluster->min_child_level > child_idx)
1141 return;
1142
1143 spin_lock(&cluster->sync_lock);
1144 last_level = cluster->default_level;
1145 first_cpu = cpumask_equal(&cluster->num_children_in_sync,
1146 &cluster->child_cpus);
1147 cpumask_andnot(&cluster->num_children_in_sync,
1148 &cluster->num_children_in_sync, cpu);
1149
1150 for (i = 0; i < cluster->nlevels; i++) {
1151 struct lpm_cluster_level *lvl = &cluster->levels[i];
1152
1153 if (child_idx >= lvl->min_child_level)
1154 cpumask_andnot(&lvl->num_cpu_votes,
1155 &lvl->num_cpu_votes, cpu);
1156 }
1157
1158 if (from_idle && first_cpu &&
1159 (cluster->last_level == cluster->default_level))
1160 update_cluster_history(&cluster->history, cluster->last_level);
1161
1162 if (!first_cpu || cluster->last_level == cluster->default_level)
1163 goto unlock_return;
1164
1165 if (cluster->stats->sleep_time)
1166 cluster->stats->sleep_time = end_time -
1167 cluster->stats->sleep_time;
1168 lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
1169
1170 level = &cluster->levels[cluster->last_level];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001171
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -06001172 if (level->notify_rpm)
1173 system_sleep_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001174
1175 update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
1176 cluster->num_children_in_sync.bits[0],
1177 cluster->child_cpus.bits[0], from_idle);
1178 trace_cluster_exit(cluster->cluster_name, cluster->last_level,
1179 cluster->num_children_in_sync.bits[0],
1180 cluster->child_cpus.bits[0], from_idle);
1181
1182 last_level = cluster->last_level;
1183 cluster->last_level = cluster->default_level;
1184
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001185 cluster_notify(cluster, &cluster->levels[last_level], false);
1186
1187 if (from_idle)
1188 update_cluster_history(&cluster->history, last_level);
1189
1190 cluster_unprepare(cluster->parent, &cluster->child_cpus,
1191 last_level, from_idle, end_time);
1192unlock_return:
1193 spin_unlock(&cluster->sync_lock);
1194}
1195
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001196static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001197 bool from_idle)
1198{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001199 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001200
1201 /* Use broadcast timer for aggregating sleep mode within a cluster.
1202 * A broadcast timer could be used in the following scenarios
1203 * 1) The architected timer HW gets reset during certain low power
1204 * modes and the core relies on a external(broadcast) timer to wake up
1205 * from sleep. This information is passed through device tree.
1206 * 2) The CPU low power mode could trigger a system low power mode.
1207 * The low power module relies on Broadcast timer to aggregate the
1208 * next wakeup within a cluster, in which case, CPU switches over to
1209 * use broadcast timer.
1210 */
Maulik Shah296ddb12017-07-03 12:25:54 +05301211
1212 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001213 cpu_pm_enter();
1214
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001215}
1216
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001217static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001218 bool from_idle)
1219{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001220 struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001221
Maulik Shah296ddb12017-07-03 12:25:54 +05301222 if (from_idle && cpu_level->is_reset)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001223 cpu_pm_exit();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001224}
1225
1226int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
1227{
1228 int state_id = 0;
1229
1230 if (!cluster)
1231 return 0;
1232
1233 spin_lock(&cluster->sync_lock);
1234
1235 if (!cpumask_equal(&cluster->num_children_in_sync,
1236 &cluster->child_cpus))
1237 goto unlock_and_return;
1238
1239 state_id |= get_cluster_id(cluster->parent, aff_lvl);
1240
1241 if (cluster->last_level != cluster->default_level) {
1242 struct lpm_cluster_level *level
1243 = &cluster->levels[cluster->last_level];
1244
1245 state_id |= (level->psci_id & cluster->psci_mode_mask)
1246 << cluster->psci_mode_shift;
1247 (*aff_lvl)++;
1248 }
1249unlock_and_return:
1250 spin_unlock(&cluster->sync_lock);
1251 return state_id;
1252}
1253
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001254static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001255{
Maulik Shah296ddb12017-07-03 12:25:54 +05301256 int affinity_level = 0, state_id = 0, power_state = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001257 bool success = false;
1258 /*
1259 * idx = 0 is the default LPM state
1260 */
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001261
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001262 if (!idx) {
1263 stop_critical_timings();
1264 wfi();
1265 start_critical_timings();
1266 return 1;
1267 }
1268
Maulik Shah296ddb12017-07-03 12:25:54 +05301269 if (from_idle && cpu->levels[idx].use_bc_timer) {
1270 if (tick_broadcast_enter())
1271 return success;
1272 }
1273
1274 state_id = get_cluster_id(cpu->parent, &affinity_level);
1275 power_state = PSCI_POWER_STATE(cpu->levels[idx].is_reset);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001276 affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
Maulik Shah296ddb12017-07-03 12:25:54 +05301277 state_id |= power_state | affinity_level | cpu->levels[idx].psci_id;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001278
1279 update_debug_pc_event(CPU_ENTER, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301280 0xdeaffeed, 0xdeaffeed, from_idle);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001281 stop_critical_timings();
Maulik Shah296ddb12017-07-03 12:25:54 +05301282
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001283 success = !arm_cpuidle_suspend(state_id);
Maulik Shah296ddb12017-07-03 12:25:54 +05301284
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001285 start_critical_timings();
1286 update_debug_pc_event(CPU_EXIT, state_id,
Maulik Shah296ddb12017-07-03 12:25:54 +05301287 success, 0xdeaffeed, from_idle);
Mahesh Sivasubramanian9bb4b8c52017-07-27 09:43:44 -06001288
1289 if (from_idle && cpu->levels[idx].use_bc_timer)
1290 tick_broadcast_exit();
1291
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001292 return success;
1293}
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001294
1295static int lpm_cpuidle_select(struct cpuidle_driver *drv,
1296 struct cpuidle_device *dev)
1297{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001298 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001299
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001300 if (!cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001301 return 0;
1302
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001303 return cpu_power_select(dev, cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001304}
1305
1306static void update_history(struct cpuidle_device *dev, int idx)
1307{
1308 struct lpm_history *history = &per_cpu(hist, dev->cpu);
1309 uint32_t tmr = 0;
1310
1311 if (!lpm_prediction)
1312 return;
1313
1314 if (history->htmr_wkup) {
1315 if (!history->hptr)
1316 history->hptr = MAXSAMPLES-1;
1317 else
1318 history->hptr--;
1319
1320 history->resi[history->hptr] += dev->last_residency;
1321 history->htmr_wkup = 0;
1322 tmr = 1;
1323 } else
1324 history->resi[history->hptr] = dev->last_residency;
1325
1326 history->mode[history->hptr] = idx;
1327
1328 trace_cpu_pred_hist(history->mode[history->hptr],
1329 history->resi[history->hptr], history->hptr, tmr);
1330
1331 if (history->nsamp < MAXSAMPLES)
1332 history->nsamp++;
1333
1334 (history->hptr)++;
1335 if (history->hptr >= MAXSAMPLES)
1336 history->hptr = 0;
1337}
1338
1339static int lpm_cpuidle_enter(struct cpuidle_device *dev,
1340 struct cpuidle_driver *drv, int idx)
1341{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001342 struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
Maulik Shah4275b222017-03-06 11:04:39 +05301343 bool success = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001344 const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
1345 int64_t start_time = ktime_to_ns(ktime_get()), end_time;
1346 struct power_params *pwr_params;
1347
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001348 pwr_params = &cpu->levels[idx].pwr;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001349
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001350 cpu_prepare(cpu, idx, true);
1351 cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001352
1353 trace_cpu_idle_enter(idx);
1354 lpm_stats_cpu_enter(idx, start_time);
1355
Lina Iyerb4a0c3d2017-07-17 11:50:25 -06001356 if (need_resched())
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001357 goto exit;
1358
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001359 success = psci_enter_sleep(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001360
1361exit:
1362 end_time = ktime_to_ns(ktime_get());
1363 lpm_stats_cpu_exit(idx, end_time, success);
1364
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001365 cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
1366 cpu_unprepare(cpu, idx, true);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001367 sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
1368 end_time = ktime_to_ns(ktime_get()) - start_time;
1369 do_div(end_time, 1000);
1370 dev->last_residency = end_time;
1371 update_history(dev, idx);
1372 trace_cpu_idle_exit(idx, success);
1373 local_irq_enable();
1374 if (lpm_prediction) {
1375 histtimer_cancel();
1376 clusttimer_cancel();
1377 }
1378 return idx;
1379}
1380
1381#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
1382static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1383 struct cpumask *mask)
1384{
1385 struct cpuidle_device *device;
1386 int cpu, ret;
1387
1388
1389 if (!mask || !drv)
1390 return -EINVAL;
1391
1392 drv->cpumask = mask;
1393 ret = cpuidle_register_driver(drv);
1394 if (ret) {
1395 pr_err("Failed to register cpuidle driver %d\n", ret);
1396 goto failed_driver_register;
1397 }
1398
1399 for_each_cpu(cpu, mask) {
1400 device = &per_cpu(cpuidle_dev, cpu);
1401 device->cpu = cpu;
1402
1403 ret = cpuidle_register_device(device);
1404 if (ret) {
1405 pr_err("Failed to register cpuidle driver for cpu:%u\n",
1406 cpu);
1407 goto failed_driver_register;
1408 }
1409 }
1410 return ret;
1411failed_driver_register:
1412 for_each_cpu(cpu, mask)
1413 cpuidle_unregister_driver(drv);
1414 return ret;
1415}
1416#else
1417static int cpuidle_register_cpu(struct cpuidle_driver *drv,
1418 struct cpumask *mask)
1419{
1420 return cpuidle_register(drv, NULL);
1421}
1422#endif
1423
1424static struct cpuidle_governor lpm_governor = {
1425 .name = "qcom",
1426 .rating = 30,
1427 .select = lpm_cpuidle_select,
1428 .owner = THIS_MODULE,
1429};
1430
1431static int cluster_cpuidle_register(struct lpm_cluster *cl)
1432{
1433 int i = 0, ret = 0;
1434 unsigned int cpu;
1435 struct lpm_cluster *p = NULL;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001436 struct lpm_cpu *lpm_cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001437
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001438 if (list_empty(&cl->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001439 struct lpm_cluster *n;
1440
1441 list_for_each_entry(n, &cl->child, list) {
1442 ret = cluster_cpuidle_register(n);
1443 if (ret)
1444 break;
1445 }
1446 return ret;
1447 }
1448
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001449 list_for_each_entry(lpm_cpu, &cl->cpu, list) {
1450 lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
1451 if (!lpm_cpu->drv)
1452 return -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001453
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001454 lpm_cpu->drv->name = "msm_idle";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001455
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001456 for (i = 0; i < lpm_cpu->nlevels; i++) {
1457 struct cpuidle_state *st = &lpm_cpu->drv->states[i];
1458 struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001459
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001460 snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
1461 snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
1462 st->flags = 0;
1463 st->exit_latency = cpu_level->pwr.latency_us;
1464 st->power_usage = cpu_level->pwr.ss_power;
1465 st->target_residency = 0;
1466 st->enter = lpm_cpuidle_enter;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001467 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001468
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001469 lpm_cpu->drv->state_count = lpm_cpu->nlevels;
1470 lpm_cpu->drv->safe_state_index = 0;
1471 for_each_cpu(cpu, &lpm_cpu->related_cpus)
1472 per_cpu(cpu_lpm, cpu) = lpm_cpu;
1473
1474 for_each_possible_cpu(cpu) {
1475 if (cpu_online(cpu))
1476 continue;
1477 if (per_cpu(cpu_lpm, cpu))
1478 p = per_cpu(cpu_lpm, cpu)->parent;
1479 while (p) {
1480 int j;
1481
1482 spin_lock(&p->sync_lock);
1483 cpumask_set_cpu(cpu, &p->num_children_in_sync);
1484 for (j = 0; j < p->nlevels; j++)
1485 cpumask_copy(
1486 &p->levels[j].num_cpu_votes,
1487 &p->num_children_in_sync);
1488 spin_unlock(&p->sync_lock);
1489 p = p->parent;
1490 }
1491 }
1492 ret = cpuidle_register_cpu(lpm_cpu->drv,
1493 &lpm_cpu->related_cpus);
1494
1495 if (ret) {
1496 kfree(lpm_cpu->drv);
1497 return -ENOMEM;
1498 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001499 }
1500 return 0;
1501}
1502
1503/**
1504 * init_lpm - initializes the governor
1505 */
1506static int __init init_lpm(void)
1507{
1508 return cpuidle_register_governor(&lpm_governor);
1509}
1510
1511postcore_initcall(init_lpm);
1512
1513static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
1514 struct lpm_cluster *parent)
1515{
1516 const char **level_name;
1517 int i;
1518
1519 level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
1520
1521 if (!level_name)
1522 return;
1523
1524 for (i = 0; i < cpu->nlevels; i++)
1525 level_name[i] = cpu->levels[i].name;
1526
1527 lpm_stats_config_level("cpu", level_name, cpu->nlevels,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001528 parent->stats, &cpu->related_cpus);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001529
1530 kfree(level_name);
1531}
1532
1533static void register_cluster_lpm_stats(struct lpm_cluster *cl,
1534 struct lpm_cluster *parent)
1535{
1536 const char **level_name;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001537 struct lpm_cluster *child;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001538 struct lpm_cpu *cpu;
1539 int i;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001540
1541 if (!cl)
1542 return;
1543
1544 level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
1545
1546 if (!level_name)
1547 return;
1548
1549 for (i = 0; i < cl->nlevels; i++)
1550 level_name[i] = cl->levels[i].level_name;
1551
1552 cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
1553 cl->nlevels, parent ? parent->stats : NULL, NULL);
1554
1555 kfree(level_name);
1556
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001557 list_for_each_entry(cpu, &cl->cpu, list) {
1558 pr_err("%s()\n", __func__);
1559 register_cpu_lpm_stats(cpu, cl);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001560 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001561 if (!list_empty(&cl->cpu))
1562 return;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001563
1564 list_for_each_entry(child, &cl->child, list)
1565 register_cluster_lpm_stats(child, cl);
1566}
1567
1568static int lpm_suspend_prepare(void)
1569{
1570 suspend_in_progress = true;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001571 lpm_stats_suspend_enter();
1572
1573 return 0;
1574}
1575
1576static void lpm_suspend_wake(void)
1577{
1578 suspend_in_progress = false;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001579 lpm_stats_suspend_exit();
1580}
1581
1582static int lpm_suspend_enter(suspend_state_t state)
1583{
1584 int cpu = raw_smp_processor_id();
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001585 struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
1586 struct lpm_cluster *cluster = lpm_cpu->parent;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001587 const struct cpumask *cpumask = get_cpu_mask(cpu);
1588 int idx;
1589
1590 for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001591 if (lpm_cpu_mode_allow(cpu, idx, false))
1592 break;
1593 }
1594 if (idx < 0) {
1595 pr_err("Failed suspend\n");
1596 return 0;
1597 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001598 cpu_prepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001599 cluster_prepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001600
1601 /*
1602 * Print the clocks which are enabled during system suspend
1603 * This debug information is useful to know which are the
1604 * clocks that are enabled and preventing the system level
1605 * LPMs(XO and Vmin).
1606 */
Maulik Shahdf5cbe32017-08-17 23:22:34 +05301607 clock_debug_print_enabled();
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001608
Maulik Shah296ddb12017-07-03 12:25:54 +05301609 psci_enter_sleep(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001610
1611 cluster_unprepare(cluster, cpumask, idx, false, 0);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -06001612 cpu_unprepare(lpm_cpu, idx, false);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001613 return 0;
1614}
1615
1616static const struct platform_suspend_ops lpm_suspend_ops = {
1617 .enter = lpm_suspend_enter,
1618 .valid = suspend_valid_only_mem,
1619 .prepare_late = lpm_suspend_prepare,
1620 .wake = lpm_suspend_wake,
1621};
1622
1623static int lpm_probe(struct platform_device *pdev)
1624{
1625 int ret;
1626 int size;
1627 struct kobject *module_kobj = NULL;
1628
1629 get_online_cpus();
1630 lpm_root_node = lpm_of_parse_cluster(pdev);
1631
1632 if (IS_ERR_OR_NULL(lpm_root_node)) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301633 pr_err("Failed to probe low power modes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001634 put_online_cpus();
1635 return PTR_ERR(lpm_root_node);
1636 }
1637
1638 if (print_parsed_dt)
1639 cluster_dt_walkthrough(lpm_root_node);
1640
1641 /*
1642 * Register hotplug notifier before broadcast time to ensure there
1643 * to prevent race where a broadcast timer might not be setup on for a
1644 * core. BUG in existing code but no known issues possibly because of
1645 * how late lpm_levels gets initialized.
1646 */
1647 suspend_set_ops(&lpm_suspend_ops);
1648 hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1649 hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1650 cluster_timer_init(lpm_root_node);
1651
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001652 size = num_dbg_elements * sizeof(struct lpm_debug);
1653 lpm_debug = dma_alloc_coherent(&pdev->dev, size,
1654 &lpm_debug_phys, GFP_KERNEL);
Maulik Shah296ddb12017-07-03 12:25:54 +05301655
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001656 register_cluster_lpm_stats(lpm_root_node, NULL);
1657
1658 ret = cluster_cpuidle_register(lpm_root_node);
1659 put_online_cpus();
1660 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301661 pr_err("Failed to register with cpuidle framework\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001662 goto failed;
1663 }
Maulik Shah296ddb12017-07-03 12:25:54 +05301664
Mahesh Sivasubramanian63ea2522016-11-04 15:40:41 -06001665 ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
1666 "AP_QCOM_SLEEP_STARTING",
1667 lpm_starting_cpu, lpm_dying_cpu);
1668 if (ret)
1669 goto failed;
1670
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001671 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
1672 if (!module_kobj) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301673 pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001674 ret = -ENOENT;
1675 goto failed;
1676 }
1677
1678 ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
1679 if (ret) {
Maulik Shah296ddb12017-07-03 12:25:54 +05301680 pr_err("Failed to create cluster level nodes\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07001681 goto failed;
1682 }
1683
1684 return 0;
1685failed:
1686 free_cluster_node(lpm_root_node);
1687 lpm_root_node = NULL;
1688 return ret;
1689}
1690
1691static const struct of_device_id lpm_mtch_tbl[] = {
1692 {.compatible = "qcom,lpm-levels"},
1693 {},
1694};
1695
1696static struct platform_driver lpm_driver = {
1697 .probe = lpm_probe,
1698 .driver = {
1699 .name = "lpm-levels",
1700 .owner = THIS_MODULE,
1701 .of_match_table = lpm_mtch_tbl,
1702 },
1703};
1704
1705static int __init lpm_levels_module_init(void)
1706{
1707 int rc;
1708
1709 rc = platform_driver_register(&lpm_driver);
1710 if (rc) {
1711 pr_info("Error registering %s\n", lpm_driver.driver.name);
1712 goto fail;
1713 }
1714
1715fail:
1716 return rc;
1717}
1718late_initcall(lpm_levels_module_init);