blob: e9a4af0f6648b72688d28ae6b68bd24c21171745 [file] [log] [blame]
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08001/*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -06004 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/smp.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080013#include <linux/cpu.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080014
15#include <asm/cacheflush.h>
Will Deaconeb504392012-01-20 12:01:12 +010016#include <asm/smp_plat.h>
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070017#include <asm/vfp.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080018
Pratik Patel5827c832012-05-19 19:24:49 -070019#include <mach/jtag.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080020#include <mach/msm_rtb.h>
21
Matt Wagantall7cca4642012-02-01 16:43:24 -080022#include "pm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070023#include "spm.h"
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080024
25extern volatile int pen_release;
26
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070027struct msm_hotplug_device {
28 struct completion cpu_killed;
29 unsigned int warm_boot;
30};
31
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060032
33static cpumask_t cpu_dying_mask;
34
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070035static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device,
36 msm_hotplug_devices);
37
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080038static inline void cpu_enter_lowpower(void)
39{
40 /* Just flush the cache. Changing the coherency is not yet
41 * available on msm. */
42 flush_cache_all();
43}
44
45static inline void cpu_leave_lowpower(void)
46{
47}
48
Stephen Boydf5e90822012-08-08 13:36:15 -070049static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080050{
51 /* Just enter wfi for now. TODO: Properly shut off the cpu. */
52 for (;;) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080053
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070054 msm_pm_cpu_enter_lowpower(cpu);
Will Deacon1d3cfb32011-08-09 12:02:27 +010055 if (pen_release == cpu_logical_map(cpu)) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080056 /*
57 * OK, proper wakeup, we're done
58 */
59 break;
60 }
61
62 /*
63 * getting here, means that we have come out of WFI without
64 * having been woken up - this shouldn't happen
65 *
66 * The trouble is, letting people know about this is not really
67 * possible, since we are currently running incoherently, and
68 * therefore cannot safely call printk() or anything else
69 */
Stephen Boydf5e90822012-08-08 13:36:15 -070070 (*spurious)++;
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080071 }
72}
73
74int platform_cpu_kill(unsigned int cpu)
75{
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060076 int ret = 0;
Anji Jonnala02dac8d2013-03-06 21:31:04 +053077
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -060078 if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
79 ret = msm_pm_wait_cpu_shutdown(cpu);
80
81 return ret ? 0 : 1;
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080082}
83
84/*
85 * platform-specific code to shutdown a CPU
86 *
87 * Called with IRQs disabled
88 */
89void platform_cpu_die(unsigned int cpu)
90{
Stephen Boydf5e90822012-08-08 13:36:15 -070091 int spurious = 0;
92
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070093 if (unlikely(cpu != smp_processor_id())) {
94 pr_crit("%s: running on %u, should be %u\n",
95 __func__, smp_processor_id(), cpu);
96 BUG();
97 }
98 complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080099 /*
100 * we're ready for shutdown now, so do it
101 */
102 cpu_enter_lowpower();
Stephen Boydf5e90822012-08-08 13:36:15 -0700103 platform_do_lowpower(cpu, &spurious);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800104
Steve Mucklef132c6c2012-06-06 18:30:57 -0700105 pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800106 cpu_leave_lowpower();
Stephen Boydf5e90822012-08-08 13:36:15 -0700107
108 if (spurious)
109 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800110}
111
112int platform_cpu_disable(unsigned int cpu)
113{
114 /*
115 * we don't allow CPU 0 to be shutdown (it is still too special
116 * e.g. clock tick interrupts)
117 */
118 return cpu == 0 ? -EPERM : 0;
119}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700120
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800121#define CPU_SHIFT 0
122#define CPU_MASK 0xF
123#define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT)
124#define CPUSET_SHIFT 4
125#define CPUSET_MASK 0xFFFF
126#define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT)
127
128static int hotplug_rtb_callback(struct notifier_block *nfb,
129 unsigned long action, void *hcpu)
130{
131 /*
132 * Bits [19:4] of the data are the online mask, lower 4 bits are the
133 * cpu number that is being changed. Additionally, changes to the
134 * online_mask that will be done by the current hotplug will be made
135 * even though they aren't necessarily in the online mask yet.
136 *
137 * XXX: This design is limited to supporting at most 16 cpus
138 */
139 int this_cpumask = CPUSET_OF(1 << (int)hcpu);
140 int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
141 int cpudata = CPU_OF((int)hcpu) | cpumask;
142
143 switch (action & (~CPU_TASKS_FROZEN)) {
144 case CPU_STARTING:
145 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
146 break;
147 case CPU_DYING:
Mahesh Sivasubramanian32b9d422013-05-20 13:50:16 -0600148 cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800149 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
150 break;
151 default:
152 break;
153 }
154
155 return NOTIFY_OK;
156}
157static struct notifier_block hotplug_rtb_notifier = {
158 .notifier_call = hotplug_rtb_callback,
159};
160
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700161int msm_platform_secondary_init(unsigned int cpu)
162{
163 int ret;
164 struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices);
165
166 if (!dev->warm_boot) {
167 dev->warm_boot = 1;
168 init_completion(&dev->cpu_killed);
169 return 0;
170 }
Pratik Patel17f3b822011-11-21 12:41:47 -0800171 msm_jtag_restore_state();
Steve Mucklef132c6c2012-06-06 18:30:57 -0700172#if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM)
173 vfp_pm_resume();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700174#endif
175 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
176
177 return ret;
178}
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800179
Steve Mucklef132c6c2012-06-06 18:30:57 -0700180static int __init init_hotplug(void)
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800181{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700182
183 struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices);
184 init_completion(&dev->cpu_killed);
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800185 return register_hotcpu_notifier(&hotplug_rtb_notifier);
186}
Steve Mucklef132c6c2012-06-06 18:30:57 -0700187early_initcall(init_hotplug);