Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2002 ARM Ltd. |
| 3 | * All Rights Reserved |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame^] | 4 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/smp.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 13 | #include <linux/cpu.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 14 | |
| 15 | #include <asm/cacheflush.h> |
Will Deacon | eb50439 | 2012-01-20 12:01:12 +0100 | [diff] [blame] | 16 | #include <asm/smp_plat.h> |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 17 | #include <asm/vfp.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 18 | |
Pratik Patel | 5827c83 | 2012-05-19 19:24:49 -0700 | [diff] [blame] | 19 | #include <mach/jtag.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 20 | #include <mach/msm_rtb.h> |
| 21 | |
Matt Wagantall | 7cca464 | 2012-02-01 16:43:24 -0800 | [diff] [blame] | 22 | #include "pm.h" |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 23 | #include "spm.h" |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 24 | |
| 25 | extern volatile int pen_release; |
| 26 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 27 | struct msm_hotplug_device { |
| 28 | struct completion cpu_killed; |
| 29 | unsigned int warm_boot; |
| 30 | }; |
| 31 | |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame^] | 32 | |
| 33 | static cpumask_t cpu_dying_mask; |
| 34 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 35 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device, |
| 36 | msm_hotplug_devices); |
| 37 | |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 38 | static inline void cpu_enter_lowpower(void) |
| 39 | { |
| 40 | /* Just flush the cache. Changing the coherency is not yet |
| 41 | * available on msm. */ |
| 42 | flush_cache_all(); |
| 43 | } |
| 44 | |
| 45 | static inline void cpu_leave_lowpower(void) |
| 46 | { |
| 47 | } |
| 48 | |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 49 | static inline void platform_do_lowpower(unsigned int cpu, int *spurious) |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 50 | { |
| 51 | /* Just enter wfi for now. TODO: Properly shut off the cpu. */ |
| 52 | for (;;) { |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 53 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 54 | msm_pm_cpu_enter_lowpower(cpu); |
Will Deacon | 1d3cfb3 | 2011-08-09 12:02:27 +0100 | [diff] [blame] | 55 | if (pen_release == cpu_logical_map(cpu)) { |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 56 | /* |
| 57 | * OK, proper wakeup, we're done |
| 58 | */ |
| 59 | break; |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * getting here, means that we have come out of WFI without |
| 64 | * having been woken up - this shouldn't happen |
| 65 | * |
| 66 | * The trouble is, letting people know about this is not really |
| 67 | * possible, since we are currently running incoherently, and |
| 68 | * therefore cannot safely call printk() or anything else |
| 69 | */ |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 70 | (*spurious)++; |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 71 | } |
| 72 | } |
| 73 | |
| 74 | int platform_cpu_kill(unsigned int cpu) |
| 75 | { |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame^] | 76 | int ret = 0; |
Anji Jonnala | 02dac8d | 2013-03-06 21:31:04 +0530 | [diff] [blame] | 77 | |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame^] | 78 | if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask)) |
| 79 | ret = msm_pm_wait_cpu_shutdown(cpu); |
| 80 | |
| 81 | return ret ? 0 : 1; |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /* |
| 85 | * platform-specific code to shutdown a CPU |
| 86 | * |
| 87 | * Called with IRQs disabled |
| 88 | */ |
| 89 | void platform_cpu_die(unsigned int cpu) |
| 90 | { |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 91 | int spurious = 0; |
| 92 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 93 | if (unlikely(cpu != smp_processor_id())) { |
| 94 | pr_crit("%s: running on %u, should be %u\n", |
| 95 | __func__, smp_processor_id(), cpu); |
| 96 | BUG(); |
| 97 | } |
| 98 | complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 99 | /* |
| 100 | * we're ready for shutdown now, so do it |
| 101 | */ |
| 102 | cpu_enter_lowpower(); |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 103 | platform_do_lowpower(cpu, &spurious); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 104 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 105 | pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 106 | cpu_leave_lowpower(); |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 107 | |
| 108 | if (spurious) |
| 109 | pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | int platform_cpu_disable(unsigned int cpu) |
| 113 | { |
| 114 | /* |
| 115 | * we don't allow CPU 0 to be shutdown (it is still too special |
| 116 | * e.g. clock tick interrupts) |
| 117 | */ |
| 118 | return cpu == 0 ? -EPERM : 0; |
| 119 | } |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 120 | |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 121 | #define CPU_SHIFT 0 |
| 122 | #define CPU_MASK 0xF |
| 123 | #define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT) |
| 124 | #define CPUSET_SHIFT 4 |
| 125 | #define CPUSET_MASK 0xFFFF |
| 126 | #define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT) |
| 127 | |
| 128 | static int hotplug_rtb_callback(struct notifier_block *nfb, |
| 129 | unsigned long action, void *hcpu) |
| 130 | { |
| 131 | /* |
| 132 | * Bits [19:4] of the data are the online mask, lower 4 bits are the |
| 133 | * cpu number that is being changed. Additionally, changes to the |
| 134 | * online_mask that will be done by the current hotplug will be made |
| 135 | * even though they aren't necessarily in the online mask yet. |
| 136 | * |
| 137 | * XXX: This design is limited to supporting at most 16 cpus |
| 138 | */ |
| 139 | int this_cpumask = CPUSET_OF(1 << (int)hcpu); |
| 140 | int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]); |
| 141 | int cpudata = CPU_OF((int)hcpu) | cpumask; |
| 142 | |
| 143 | switch (action & (~CPU_TASKS_FROZEN)) { |
| 144 | case CPU_STARTING: |
| 145 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask)); |
| 146 | break; |
| 147 | case CPU_DYING: |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame^] | 148 | cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask); |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 149 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask)); |
| 150 | break; |
| 151 | default: |
| 152 | break; |
| 153 | } |
| 154 | |
| 155 | return NOTIFY_OK; |
| 156 | } |
| 157 | static struct notifier_block hotplug_rtb_notifier = { |
| 158 | .notifier_call = hotplug_rtb_callback, |
| 159 | }; |
| 160 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 161 | int msm_platform_secondary_init(unsigned int cpu) |
| 162 | { |
| 163 | int ret; |
| 164 | struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices); |
| 165 | |
| 166 | if (!dev->warm_boot) { |
| 167 | dev->warm_boot = 1; |
| 168 | init_completion(&dev->cpu_killed); |
| 169 | return 0; |
| 170 | } |
Pratik Patel | 17f3b82 | 2011-11-21 12:41:47 -0800 | [diff] [blame] | 171 | msm_jtag_restore_state(); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 172 | #if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM) |
| 173 | vfp_pm_resume(); |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 174 | #endif |
| 175 | ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); |
| 176 | |
| 177 | return ret; |
| 178 | } |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 179 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 180 | static int __init init_hotplug(void) |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 181 | { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 182 | |
| 183 | struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices); |
| 184 | init_completion(&dev->cpu_killed); |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 185 | return register_hotcpu_notifier(&hotplug_rtb_notifier); |
| 186 | } |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 187 | early_initcall(init_hotplug); |