blob: 8a1ff351365347d6902b5627dd33b0f3f25000f2 [file] [log] [blame]
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08001/*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
Matt Wagantall902c05e2012-01-31 16:39:22 -08004 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/smp.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080013#include <linux/cpu.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080014
15#include <asm/cacheflush.h>
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070016#include <asm/vfp.h>
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080017
Pratik Patel606fb502012-03-17 22:11:03 -070018#include <mach/qdss.h>
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -080019#include <mach/msm_rtb.h>
20
Matt Wagantall7cca4642012-02-01 16:43:24 -080021#include "pm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070022#include "spm.h"
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070023
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080024extern volatile int pen_release;
25
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070026struct msm_hotplug_device {
27 struct completion cpu_killed;
28 unsigned int warm_boot;
29};
30
31static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device,
32 msm_hotplug_devices);
33
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080034static inline void cpu_enter_lowpower(void)
35{
36 /* Just flush the cache. Changing the coherency is not yet
37 * available on msm. */
38 flush_cache_all();
39}
40
41static inline void cpu_leave_lowpower(void)
42{
43}
44
45static inline void platform_do_lowpower(unsigned int cpu)
46{
47 /* Just enter wfi for now. TODO: Properly shut off the cpu. */
48 for (;;) {
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080049
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070050 msm_pm_cpu_enter_lowpower(cpu);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080051 if (pen_release == cpu) {
52 /*
53 * OK, proper wakeup, we're done
54 */
Jeff Ohlstein766ccf62012-02-07 18:26:02 -080055 pen_release = -1;
56 dmac_flush_range((void *)&pen_release,
57 (void *)(&pen_release + sizeof(pen_release)));
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080058 break;
59 }
60
61 /*
62 * getting here, means that we have come out of WFI without
63 * having been woken up - this shouldn't happen
64 *
65 * The trouble is, letting people know about this is not really
66 * possible, since we are currently running incoherently, and
67 * therefore cannot safely call printk() or anything else
68 */
Jeff Ohlstein766ccf62012-02-07 18:26:02 -080069 dmac_inv_range((void *)&pen_release,
70 (void *)(&pen_release + sizeof(pen_release)));
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080071 pr_debug("CPU%u: spurious wakeup call\n", cpu);
72 }
73}
74
75int platform_cpu_kill(unsigned int cpu)
76{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070077 struct completion *killed =
78 &per_cpu(msm_hotplug_devices, cpu).cpu_killed;
Matt Wagantall902c05e2012-01-31 16:39:22 -080079 int ret;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070080
Matt Wagantall902c05e2012-01-31 16:39:22 -080081 ret = wait_for_completion_timeout(killed, HZ * 5);
82 if (ret)
83 return ret;
84
85 return msm_pm_wait_cpu_shutdown(cpu);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -080086}
87
88/*
89 * platform-specific code to shutdown a CPU
90 *
91 * Called with IRQs disabled
92 */
93void platform_cpu_die(unsigned int cpu)
94{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -070095 if (unlikely(cpu != smp_processor_id())) {
96 pr_crit("%s: running on %u, should be %u\n",
97 __func__, smp_processor_id(), cpu);
98 BUG();
99 }
100 complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800101 /*
102 * we're ready for shutdown now, so do it
103 */
104 cpu_enter_lowpower();
105 platform_do_lowpower(cpu);
106
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700107 pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__);
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800108 cpu_leave_lowpower();
Jeff Ohlstein9f1890a2010-12-02 12:11:27 -0800109}
110
111int platform_cpu_disable(unsigned int cpu)
112{
113 /*
114 * we don't allow CPU 0 to be shutdown (it is still too special
115 * e.g. clock tick interrupts)
116 */
117 return cpu == 0 ? -EPERM : 0;
118}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700119
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800120#define CPU_SHIFT 0
121#define CPU_MASK 0xF
122#define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT)
123#define CPUSET_SHIFT 4
124#define CPUSET_MASK 0xFFFF
125#define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT)
126
127static int hotplug_rtb_callback(struct notifier_block *nfb,
128 unsigned long action, void *hcpu)
129{
130 /*
131 * Bits [19:4] of the data are the online mask, lower 4 bits are the
132 * cpu number that is being changed. Additionally, changes to the
133 * online_mask that will be done by the current hotplug will be made
134 * even though they aren't necessarily in the online mask yet.
135 *
136 * XXX: This design is limited to supporting at most 16 cpus
137 */
138 int this_cpumask = CPUSET_OF(1 << (int)hcpu);
139 int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
140 int cpudata = CPU_OF((int)hcpu) | cpumask;
141
142 switch (action & (~CPU_TASKS_FROZEN)) {
143 case CPU_STARTING:
144 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
145 break;
146 case CPU_DYING:
147 uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
148 break;
149 default:
150 break;
151 }
152
153 return NOTIFY_OK;
154}
155static struct notifier_block hotplug_rtb_notifier = {
156 .notifier_call = hotplug_rtb_callback,
157};
158
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700159int msm_platform_secondary_init(unsigned int cpu)
160{
161 int ret;
162 struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices);
163
164 if (!dev->warm_boot) {
165 dev->warm_boot = 1;
166 init_completion(&dev->cpu_killed);
167 return 0;
168 }
Pratik Patel17f3b822011-11-21 12:41:47 -0800169 msm_jtag_restore_state();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700170#ifdef CONFIG_VFP
171 vfp_reinit();
172#endif
173 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
174
175 return ret;
176}
Jeff Ohlsteinc05defe2012-01-31 19:53:16 -0800177
178static int __init init_hotplug_notifier(void)
179{
180 return register_hotcpu_notifier(&hotplug_rtb_notifier);
181}
182early_initcall(init_hotplug_notifier);