Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2002 ARM Ltd. |
| 3 | * All Rights Reserved |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame] | 4 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/smp.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 13 | #include <linux/cpu.h> |
Matt Wagantall | 242fec5 | 2013-11-26 18:16:24 -0800 | [diff] [blame] | 14 | #include <linux/ratelimit.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 15 | |
| 16 | #include <asm/cacheflush.h> |
Will Deacon | eb50439 | 2012-01-20 12:01:12 +0100 | [diff] [blame] | 17 | #include <asm/smp_plat.h> |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 18 | #include <asm/vfp.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 19 | |
Pratik Patel | 5827c83 | 2012-05-19 19:24:49 -0700 | [diff] [blame] | 20 | #include <mach/jtag.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 21 | #include <mach/msm_rtb.h> |
| 22 | |
Matt Wagantall | 7cca464 | 2012-02-01 16:43:24 -0800 | [diff] [blame] | 23 | #include "pm.h" |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 24 | #include "spm.h" |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 25 | |
| 26 | extern volatile int pen_release; |
| 27 | |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame] | 28 | static cpumask_t cpu_dying_mask; |
| 29 | |
Venkat Devarasetty | 5cf3769 | 2013-02-13 23:01:06 +0530 | [diff] [blame] | 30 | static DEFINE_PER_CPU(unsigned int, warm_boot_flag); |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 31 | |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 32 | static inline void cpu_enter_lowpower(void) |
| 33 | { |
| 34 | /* Just flush the cache. Changing the coherency is not yet |
| 35 | * available on msm. */ |
| 36 | flush_cache_all(); |
| 37 | } |
| 38 | |
| 39 | static inline void cpu_leave_lowpower(void) |
| 40 | { |
| 41 | } |
| 42 | |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 43 | static inline void platform_do_lowpower(unsigned int cpu, int *spurious) |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 44 | { |
| 45 | /* Just enter wfi for now. TODO: Properly shut off the cpu. */ |
| 46 | for (;;) { |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 47 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 48 | msm_pm_cpu_enter_lowpower(cpu); |
Will Deacon | 1d3cfb3 | 2011-08-09 12:02:27 +0100 | [diff] [blame] | 49 | if (pen_release == cpu_logical_map(cpu)) { |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 50 | /* |
| 51 | * OK, proper wakeup, we're done |
| 52 | */ |
| 53 | break; |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * getting here, means that we have come out of WFI without |
| 58 | * having been woken up - this shouldn't happen |
| 59 | * |
| 60 | * The trouble is, letting people know about this is not really |
| 61 | * possible, since we are currently running incoherently, and |
| 62 | * therefore cannot safely call printk() or anything else |
| 63 | */ |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 64 | (*spurious)++; |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 65 | } |
| 66 | } |
| 67 | |
| 68 | int platform_cpu_kill(unsigned int cpu) |
| 69 | { |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame] | 70 | int ret = 0; |
Anji Jonnala | 02dac8d | 2013-03-06 21:31:04 +0530 | [diff] [blame] | 71 | |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame] | 72 | if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask)) |
| 73 | ret = msm_pm_wait_cpu_shutdown(cpu); |
| 74 | |
| 75 | return ret ? 0 : 1; |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | /* |
| 79 | * platform-specific code to shutdown a CPU |
| 80 | * |
| 81 | * Called with IRQs disabled |
| 82 | */ |
| 83 | void platform_cpu_die(unsigned int cpu) |
| 84 | { |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 85 | int spurious = 0; |
| 86 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 87 | if (unlikely(cpu != smp_processor_id())) { |
| 88 | pr_crit("%s: running on %u, should be %u\n", |
| 89 | __func__, smp_processor_id(), cpu); |
| 90 | BUG(); |
| 91 | } |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 92 | /* |
| 93 | * we're ready for shutdown now, so do it |
| 94 | */ |
| 95 | cpu_enter_lowpower(); |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 96 | platform_do_lowpower(cpu, &spurious); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 97 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 98 | pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 99 | cpu_leave_lowpower(); |
Stephen Boyd | f5e9082 | 2012-08-08 13:36:15 -0700 | [diff] [blame] | 100 | |
| 101 | if (spurious) |
| 102 | pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | int platform_cpu_disable(unsigned int cpu) |
| 106 | { |
| 107 | /* |
| 108 | * we don't allow CPU 0 to be shutdown (it is still too special |
| 109 | * e.g. clock tick interrupts) |
| 110 | */ |
| 111 | return cpu == 0 ? -EPERM : 0; |
| 112 | } |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 113 | |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 114 | #define CPU_SHIFT 0 |
| 115 | #define CPU_MASK 0xF |
| 116 | #define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT) |
| 117 | #define CPUSET_SHIFT 4 |
| 118 | #define CPUSET_MASK 0xFFFF |
| 119 | #define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT) |
| 120 | |
| 121 | static int hotplug_rtb_callback(struct notifier_block *nfb, |
| 122 | unsigned long action, void *hcpu) |
| 123 | { |
| 124 | /* |
| 125 | * Bits [19:4] of the data are the online mask, lower 4 bits are the |
| 126 | * cpu number that is being changed. Additionally, changes to the |
| 127 | * online_mask that will be done by the current hotplug will be made |
| 128 | * even though they aren't necessarily in the online mask yet. |
| 129 | * |
| 130 | * XXX: This design is limited to supporting at most 16 cpus |
| 131 | */ |
| 132 | int this_cpumask = CPUSET_OF(1 << (int)hcpu); |
| 133 | int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]); |
| 134 | int cpudata = CPU_OF((int)hcpu) | cpumask; |
| 135 | |
| 136 | switch (action & (~CPU_TASKS_FROZEN)) { |
| 137 | case CPU_STARTING: |
| 138 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask)); |
| 139 | break; |
| 140 | case CPU_DYING: |
Mahesh Sivasubramanian | 32b9d42 | 2013-05-20 13:50:16 -0600 | [diff] [blame] | 141 | cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask); |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 142 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask)); |
| 143 | break; |
| 144 | default: |
| 145 | break; |
| 146 | } |
| 147 | |
| 148 | return NOTIFY_OK; |
| 149 | } |
| 150 | static struct notifier_block hotplug_rtb_notifier = { |
| 151 | .notifier_call = hotplug_rtb_callback, |
| 152 | }; |
| 153 | |
Matt Wagantall | 242fec5 | 2013-11-26 18:16:24 -0800 | [diff] [blame] | 154 | static int hotplug_cpu_check_callback(struct notifier_block *nfb, |
| 155 | unsigned long action, void *hcpu) |
| 156 | { |
| 157 | int cpu = (int)hcpu; |
| 158 | |
| 159 | switch (action & (~CPU_TASKS_FROZEN)) { |
| 160 | case CPU_DOWN_PREPARE: |
| 161 | if (cpu == 0) { |
| 162 | pr_err_ratelimited("CPU0 hotplug is not supported\n"); |
| 163 | return NOTIFY_BAD; |
| 164 | } |
| 165 | break; |
| 166 | default: |
| 167 | break; |
| 168 | } |
| 169 | |
| 170 | return NOTIFY_OK; |
| 171 | } |
| 172 | static struct notifier_block hotplug_cpu_check_notifier = { |
| 173 | .notifier_call = hotplug_cpu_check_callback, |
| 174 | .priority = INT_MAX, |
| 175 | }; |
| 176 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 177 | int msm_platform_secondary_init(unsigned int cpu) |
| 178 | { |
| 179 | int ret; |
Venkat Devarasetty | 5cf3769 | 2013-02-13 23:01:06 +0530 | [diff] [blame] | 180 | unsigned int *warm_boot = &__get_cpu_var(warm_boot_flag); |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 181 | |
Venkat Devarasetty | 5cf3769 | 2013-02-13 23:01:06 +0530 | [diff] [blame] | 182 | if (!(*warm_boot)) { |
| 183 | *warm_boot = 1; |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 184 | return 0; |
| 185 | } |
Pratik Patel | 17f3b82 | 2011-11-21 12:41:47 -0800 | [diff] [blame] | 186 | msm_jtag_restore_state(); |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 187 | #if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM) |
| 188 | vfp_pm_resume(); |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 189 | #endif |
| 190 | ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); |
| 191 | |
| 192 | return ret; |
| 193 | } |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 194 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 195 | static int __init init_hotplug(void) |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 196 | { |
Matt Wagantall | 242fec5 | 2013-11-26 18:16:24 -0800 | [diff] [blame] | 197 | int rc; |
| 198 | |
| 199 | rc = register_hotcpu_notifier(&hotplug_rtb_notifier); |
| 200 | if (rc) |
| 201 | return rc; |
| 202 | |
| 203 | return register_hotcpu_notifier(&hotplug_cpu_check_notifier); |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 204 | } |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 205 | early_initcall(init_hotplug); |