Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2002 ARM Ltd. |
| 3 | * All Rights Reserved |
Matt Wagantall | 902c05e | 2012-01-31 16:39:22 -0800 | [diff] [blame] | 4 | * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/smp.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 13 | #include <linux/cpu.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 14 | |
| 15 | #include <asm/cacheflush.h> |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 16 | #include <asm/vfp.h> |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 17 | |
Pratik Patel | 606fb50 | 2012-03-17 22:11:03 -0700 | [diff] [blame] | 18 | #include <mach/qdss.h> |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 19 | #include <mach/msm_rtb.h> |
| 20 | |
Matt Wagantall | 7cca464 | 2012-02-01 16:43:24 -0800 | [diff] [blame] | 21 | #include "pm.h" |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 22 | #include "spm.h" |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 23 | |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 24 | extern volatile int pen_release; |
| 25 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 26 | struct msm_hotplug_device { |
| 27 | struct completion cpu_killed; |
| 28 | unsigned int warm_boot; |
| 29 | }; |
| 30 | |
| 31 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_hotplug_device, |
| 32 | msm_hotplug_devices); |
| 33 | |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 34 | static inline void cpu_enter_lowpower(void) |
| 35 | { |
| 36 | /* Just flush the cache. Changing the coherency is not yet |
| 37 | * available on msm. */ |
| 38 | flush_cache_all(); |
| 39 | } |
| 40 | |
| 41 | static inline void cpu_leave_lowpower(void) |
| 42 | { |
| 43 | } |
| 44 | |
| 45 | static inline void platform_do_lowpower(unsigned int cpu) |
| 46 | { |
| 47 | /* Just enter wfi for now. TODO: Properly shut off the cpu. */ |
| 48 | for (;;) { |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 49 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 50 | msm_pm_cpu_enter_lowpower(cpu); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 51 | if (pen_release == cpu) { |
| 52 | /* |
| 53 | * OK, proper wakeup, we're done |
| 54 | */ |
Jeff Ohlstein | 766ccf6 | 2012-02-07 18:26:02 -0800 | [diff] [blame] | 55 | pen_release = -1; |
| 56 | dmac_flush_range((void *)&pen_release, |
| 57 | (void *)(&pen_release + sizeof(pen_release))); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 58 | break; |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * getting here, means that we have come out of WFI without |
| 63 | * having been woken up - this shouldn't happen |
| 64 | * |
| 65 | * The trouble is, letting people know about this is not really |
| 66 | * possible, since we are currently running incoherently, and |
| 67 | * therefore cannot safely call printk() or anything else |
| 68 | */ |
Jeff Ohlstein | 766ccf6 | 2012-02-07 18:26:02 -0800 | [diff] [blame] | 69 | dmac_inv_range((void *)&pen_release, |
| 70 | (void *)(&pen_release + sizeof(pen_release))); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 71 | pr_debug("CPU%u: spurious wakeup call\n", cpu); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | int platform_cpu_kill(unsigned int cpu) |
| 76 | { |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 77 | struct completion *killed = |
| 78 | &per_cpu(msm_hotplug_devices, cpu).cpu_killed; |
Matt Wagantall | 902c05e | 2012-01-31 16:39:22 -0800 | [diff] [blame] | 79 | int ret; |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 80 | |
Matt Wagantall | 902c05e | 2012-01-31 16:39:22 -0800 | [diff] [blame] | 81 | ret = wait_for_completion_timeout(killed, HZ * 5); |
| 82 | if (ret) |
| 83 | return ret; |
| 84 | |
| 85 | return msm_pm_wait_cpu_shutdown(cpu); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | /* |
| 89 | * platform-specific code to shutdown a CPU |
| 90 | * |
| 91 | * Called with IRQs disabled |
| 92 | */ |
| 93 | void platform_cpu_die(unsigned int cpu) |
| 94 | { |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 95 | if (unlikely(cpu != smp_processor_id())) { |
| 96 | pr_crit("%s: running on %u, should be %u\n", |
| 97 | __func__, smp_processor_id(), cpu); |
| 98 | BUG(); |
| 99 | } |
| 100 | complete(&__get_cpu_var(msm_hotplug_devices).cpu_killed); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 101 | /* |
| 102 | * we're ready for shutdown now, so do it |
| 103 | */ |
| 104 | cpu_enter_lowpower(); |
| 105 | platform_do_lowpower(cpu); |
| 106 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 107 | pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 108 | cpu_leave_lowpower(); |
Jeff Ohlstein | 9f1890a | 2010-12-02 12:11:27 -0800 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | int platform_cpu_disable(unsigned int cpu) |
| 112 | { |
| 113 | /* |
| 114 | * we don't allow CPU 0 to be shutdown (it is still too special |
| 115 | * e.g. clock tick interrupts) |
| 116 | */ |
| 117 | return cpu == 0 ? -EPERM : 0; |
| 118 | } |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 119 | |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 120 | #define CPU_SHIFT 0 |
| 121 | #define CPU_MASK 0xF |
| 122 | #define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT) |
| 123 | #define CPUSET_SHIFT 4 |
| 124 | #define CPUSET_MASK 0xFFFF |
| 125 | #define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT) |
| 126 | |
| 127 | static int hotplug_rtb_callback(struct notifier_block *nfb, |
| 128 | unsigned long action, void *hcpu) |
| 129 | { |
| 130 | /* |
| 131 | * Bits [19:4] of the data are the online mask, lower 4 bits are the |
| 132 | * cpu number that is being changed. Additionally, changes to the |
| 133 | * online_mask that will be done by the current hotplug will be made |
| 134 | * even though they aren't necessarily in the online mask yet. |
| 135 | * |
| 136 | * XXX: This design is limited to supporting at most 16 cpus |
| 137 | */ |
| 138 | int this_cpumask = CPUSET_OF(1 << (int)hcpu); |
| 139 | int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]); |
| 140 | int cpudata = CPU_OF((int)hcpu) | cpumask; |
| 141 | |
| 142 | switch (action & (~CPU_TASKS_FROZEN)) { |
| 143 | case CPU_STARTING: |
| 144 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask)); |
| 145 | break; |
| 146 | case CPU_DYING: |
| 147 | uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask)); |
| 148 | break; |
| 149 | default: |
| 150 | break; |
| 151 | } |
| 152 | |
| 153 | return NOTIFY_OK; |
| 154 | } |
| 155 | static struct notifier_block hotplug_rtb_notifier = { |
| 156 | .notifier_call = hotplug_rtb_callback, |
| 157 | }; |
| 158 | |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 159 | int msm_platform_secondary_init(unsigned int cpu) |
| 160 | { |
| 161 | int ret; |
| 162 | struct msm_hotplug_device *dev = &__get_cpu_var(msm_hotplug_devices); |
| 163 | |
| 164 | if (!dev->warm_boot) { |
| 165 | dev->warm_boot = 1; |
| 166 | init_completion(&dev->cpu_killed); |
| 167 | return 0; |
| 168 | } |
Pratik Patel | 17f3b82 | 2011-11-21 12:41:47 -0800 | [diff] [blame] | 169 | msm_jtag_restore_state(); |
Mahesh Sivasubramanian | d23add1 | 2011-11-18 14:30:11 -0700 | [diff] [blame] | 170 | #ifdef CONFIG_VFP |
| 171 | vfp_reinit(); |
| 172 | #endif |
| 173 | ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false); |
| 174 | |
| 175 | return ret; |
| 176 | } |
Jeff Ohlstein | c05defe | 2012-01-31 19:53:16 -0800 | [diff] [blame] | 177 | |
| 178 | static int __init init_hotplug_notifier(void) |
| 179 | { |
| 180 | return register_hotcpu_notifier(&hotplug_rtb_notifier); |
| 181 | } |
| 182 | early_initcall(init_hotplug_notifier); |