blob: 174a50acf61b2d0667123b13390556da1be4b2d8 [file] [log] [blame]
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08001/*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -06004 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/smp.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080013#include <linux/cpu.h>
Matt Wagantall242fec52013-11-26 18:16:24 -080014#include <linux/ratelimit.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080015
16#include <asm/cacheflush.h>
Will Deaconeb504392012-01-20 12:01:12 +010017#include <asm/smp_plat.h>
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070018#include <asm/vfp.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080019
Pratik Patel5827c832012-05-19 19:24:49 -070020#include <mach/jtag.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080021#include <mach/msm_rtb.h>
22
Matt Wagantall7cca4642012-02-01 16:43:24 -080023#include "pm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070024#include "spm.h"
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080025
26extern volatile int pen_release;
27
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060028static cpumask_t cpu_dying_mask;
29
Venkat Devarasetty5cf37692013-02-13 23:01:06 +053030static DEFINE_PER_CPU(unsigned int, warm_boot_flag);
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070031
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080032static inline void cpu_enter_lowpower(void)
33{
34 /* Just flush the cache. Changing the coherency is not yet
35 * available on msm. */
36 flush_cache_all();
37}
38
39static inline void cpu_leave_lowpower(void)
40{
41}
42
Stephen Boydf5e90822012-08-08 13:36:15 -070043static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080044{
45 /* Just enter wfi for now. TODO: Properly shut off the cpu. */
46 for (;;) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080047
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070048 msm_pm_cpu_enter_lowpower(cpu);
Will Deacon1d3cfb32011-08-09 12:02:27 +010049 if (pen_release == cpu_logical_map(cpu)) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080050 /*
51 * OK, proper wakeup, we're done
52 */
53 break;
54 }
55
56 /*
57 * getting here, means that we have come out of WFI without
58 * having been woken up - this shouldn't happen
59 *
60 * The trouble is, letting people know about this is not really
61 * possible, since we are currently running incoherently, and
62 * therefore cannot safely call printk() or anything else
63 */
Stephen Boydf5e90822012-08-08 13:36:15 -070064 (*spurious)++;
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080065 }
66}
67
68int platform_cpu_kill(unsigned int cpu)
69{
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060070 int ret = 0;
Anji Jonnala02dac8d2013-03-06 21:31:04 +053071
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060072 if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
73 ret = msm_pm_wait_cpu_shutdown(cpu);
74
75 return ret ? 0 : 1;
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080076}
77
78/*
79 * platform-specific code to shutdown a CPU
80 *
81 * Called with IRQs disabled
82 */
83void platform_cpu_die(unsigned int cpu)
84{
Stephen Boydf5e90822012-08-08 13:36:15 -070085 int spurious = 0;
86
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070087 if (unlikely(cpu != smp_processor_id())) {
88 pr_crit("%s: running on %u, should be %u\n",
89 __func__, smp_processor_id(), cpu);
90 BUG();
91 }
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080092 /*
93 * we're ready for shutdown now, so do it
94 */
95 cpu_enter_lowpower();
Stephen Boydf5e90822012-08-08 13:36:15 -070096 platform_do_lowpower(cpu, &spurious);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080097
Steve Mucklef132c6c2012-06-06 18:30:57 -070098 pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080099 cpu_leave_lowpower();
Stephen Boydf5e90822012-08-08 13:36:15 -0700100
101 if (spurious)
102 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800103}
104
105int platform_cpu_disable(unsigned int cpu)
106{
107 /*
108 * we don't allow CPU 0 to be shutdown (it is still too special
109 * e.g. clock tick interrupts)
110 */
111 return cpu == 0 ? -EPERM : 0;
112}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700113
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800114#define CPU_SHIFT 0
115#define CPU_MASK 0xF
116#define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT)
117#define CPUSET_SHIFT 4
118#define CPUSET_MASK 0xFFFF
119#define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT)
120
121static int hotplug_rtb_callback(struct notifier_block *nfb,
122 unsigned long action, void *hcpu)
123{
124 /*
125 * Bits [19:4] of the data are the online mask, lower 4 bits are the
126 * cpu number that is being changed. Additionally, changes to the
127 * online_mask that will be done by the current hotplug will be made
128 * even though they aren't necessarily in the online mask yet.
129 *
130 * XXX: This design is limited to supporting at most 16 cpus
131 */
132 int this_cpumask = CPUSET_OF(1 << (int)hcpu);
133 int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
134 int cpudata = CPU_OF((int)hcpu) | cpumask;
135
136 switch (action & (~CPU_TASKS_FROZEN)) {
137 case CPU_STARTING:
138 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
139 break;
140 case CPU_DYING:
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -0600141 cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800142 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
143 break;
144 default:
145 break;
146 }
147
148 return NOTIFY_OK;
149}
150static struct notifier_block hotplug_rtb_notifier = {
151 .notifier_call = hotplug_rtb_callback,
152};
153
Matt Wagantall242fec52013-11-26 18:16:24 -0800154static int hotplug_cpu_check_callback(struct notifier_block *nfb,
155 unsigned long action, void *hcpu)
156{
157 int cpu = (int)hcpu;
158
159 switch (action & (~CPU_TASKS_FROZEN)) {
160 case CPU_DOWN_PREPARE:
161 if (cpu == 0) {
162 pr_err_ratelimited("CPU0 hotplug is not supported\n");
163 return NOTIFY_BAD;
164 }
165 break;
166 default:
167 break;
168 }
169
170 return NOTIFY_OK;
171}
172static struct notifier_block hotplug_cpu_check_notifier = {
173 .notifier_call = hotplug_cpu_check_callback,
174 .priority = INT_MAX,
175};
176
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700177int msm_platform_secondary_init(unsigned int cpu)
178{
179 int ret;
Venkat Devarasetty5cf37692013-02-13 23:01:06 +0530180 unsigned int *warm_boot = &__get_cpu_var(warm_boot_flag);
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700181
Venkat Devarasetty5cf37692013-02-13 23:01:06 +0530182 if (!(*warm_boot)) {
183 *warm_boot = 1;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700184 return 0;
185 }
Pratik Patel17f3b822011-11-21 12:41:47 -0800186 msm_jtag_restore_state();
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187#if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM)
188 vfp_pm_resume();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700189#endif
190 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
191
192 return ret;
193}
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800194
Steve Mucklef132c6c2012-06-06 18:30:57 -0700195static int __init init_hotplug(void)
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800196{
Matt Wagantall242fec52013-11-26 18:16:24 -0800197 int rc;
198
199 rc = register_hotcpu_notifier(&hotplug_rtb_notifier);
200 if (rc)
201 return rc;
202
203 return register_hotcpu_notifier(&hotplug_cpu_check_notifier);
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800204}
Steve Mucklef132c6c2012-06-06 18:30:57 -0700205early_initcall(init_hotplug);