Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM |
| 3 | * |
| 4 | * Created by: Nicolas Pitre, March 2012 |
| 5 | * Copyright: (C) 2012-2013 Linaro Limited |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/irqflags.h> |
Nicolas Pitre | 3721924 | 2014-06-24 18:32:51 +0100 | [diff] [blame] | 15 | #include <linux/cpu_pm.h> |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 16 | |
Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 17 | #include <asm/mcpm.h> |
| 18 | #include <asm/cacheflush.h> |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 19 | #include <asm/idmap.h> |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 20 | #include <asm/cputype.h> |
Nicolas Pitre | 3721924 | 2014-06-24 18:32:51 +0100 | [diff] [blame] | 21 | #include <asm/suspend.h> |
Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 22 | |
| 23 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
| 24 | |
| 25 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) |
| 26 | { |
| 27 | unsigned long val = ptr ? virt_to_phys(ptr) : 0; |
| 28 | mcpm_entry_vectors[cluster][cpu] = val; |
| 29 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); |
| 30 | } |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 31 | |
Nicolas Pitre | de885d1 | 2012-11-27 23:11:20 -0500 | [diff] [blame] | 32 | extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; |
| 33 | |
| 34 | void mcpm_set_early_poke(unsigned cpu, unsigned cluster, |
| 35 | unsigned long poke_phys_addr, unsigned long poke_val) |
| 36 | { |
| 37 | unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; |
| 38 | poke[0] = poke_phys_addr; |
| 39 | poke[1] = poke_val; |
Nicolas Pitre | efcfc46 | 2013-12-09 16:10:18 +0100 | [diff] [blame] | 40 | __sync_cache_range_w(poke, 2 * sizeof(*poke)); |
Nicolas Pitre | de885d1 | 2012-11-27 23:11:20 -0500 | [diff] [blame] | 41 | } |
| 42 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 43 | static const struct mcpm_platform_ops *platform_ops; |
| 44 | |
| 45 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) |
| 46 | { |
| 47 | if (platform_ops) |
| 48 | return -EBUSY; |
| 49 | platform_ops = ops; |
| 50 | return 0; |
| 51 | } |
| 52 | |
Nicolas Pitre | 4530e4b | 2014-04-22 00:25:35 +0100 | [diff] [blame] | 53 | bool mcpm_is_available(void) |
| 54 | { |
| 55 | return (platform_ops) ? true : false; |
| 56 | } |
| 57 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 58 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) |
| 59 | { |
| 60 | if (!platform_ops) |
| 61 | return -EUNATCH; /* try not to shadow power_up errors */ |
| 62 | might_sleep(); |
| 63 | return platform_ops->power_up(cpu, cluster); |
| 64 | } |
| 65 | |
| 66 | typedef void (*phys_reset_t)(unsigned long); |
| 67 | |
| 68 | void mcpm_cpu_power_down(void) |
| 69 | { |
| 70 | phys_reset_t phys_reset; |
| 71 | |
Nicolas Pitre | d0cdef6 | 2013-09-25 23:26:24 +0100 | [diff] [blame] | 72 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) |
| 73 | return; |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 74 | BUG_ON(!irqs_disabled()); |
| 75 | |
| 76 | /* |
| 77 | * Do this before calling into the power_down method, |
| 78 | * as it might not always be safe to do afterwards. |
| 79 | */ |
| 80 | setup_mm_for_reboot(); |
| 81 | |
| 82 | platform_ops->power_down(); |
| 83 | |
| 84 | /* |
| 85 | * It is possible for a power_up request to happen concurrently |
| 86 | * with a power_down request for the same CPU. In this case the |
| 87 | * power_down method might not be able to actually enter a |
| 88 | * powered down state with the WFI instruction if the power_up |
| 89 | * method has removed the required reset condition. The |
| 90 | * power_down method is then allowed to return. We must perform |
| 91 | * a re-entry in the kernel as if the power_up method just had |
| 92 | * deasserted reset on the CPU. |
| 93 | * |
| 94 | * To simplify race issues, the platform specific implementation |
| 95 | * must accommodate for the possibility of unordered calls to |
| 96 | * power_down and power_up with a usage count. Therefore, if a |
| 97 | * call to power_up is issued for a CPU that is not down, then |
| 98 | * the next call to power_down must not attempt a full shutdown |
| 99 | * but only do the minimum (normally disabling L1 cache and CPU |
| 100 | * coherency) and return just as if a concurrent power_up request |
| 101 | * had happened as described above. |
| 102 | */ |
| 103 | |
| 104 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 105 | phys_reset(virt_to_phys(mcpm_entry_point)); |
| 106 | |
| 107 | /* should never get here */ |
| 108 | BUG(); |
| 109 | } |
| 110 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 111 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 112 | { |
| 113 | int ret; |
| 114 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 115 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 116 | return -EUNATCH; |
| 117 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 118 | ret = platform_ops->wait_for_powerdown(cpu, cluster); |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 119 | if (ret) |
| 120 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", |
| 121 | __func__, cpu, cluster, ret); |
| 122 | |
| 123 | return ret; |
| 124 | } |
| 125 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 126 | void mcpm_cpu_suspend(u64 expected_residency) |
| 127 | { |
| 128 | phys_reset_t phys_reset; |
| 129 | |
Nicolas Pitre | d0cdef6 | 2013-09-25 23:26:24 +0100 | [diff] [blame] | 130 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend)) |
| 131 | return; |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 132 | BUG_ON(!irqs_disabled()); |
| 133 | |
| 134 | /* Very similar to mcpm_cpu_power_down() */ |
| 135 | setup_mm_for_reboot(); |
| 136 | platform_ops->suspend(expected_residency); |
| 137 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 138 | phys_reset(virt_to_phys(mcpm_entry_point)); |
| 139 | BUG(); |
| 140 | } |
| 141 | |
| 142 | int mcpm_cpu_powered_up(void) |
| 143 | { |
| 144 | if (!platform_ops) |
| 145 | return -EUNATCH; |
| 146 | if (platform_ops->powered_up) |
| 147 | platform_ops->powered_up(); |
| 148 | return 0; |
| 149 | } |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 150 | |
Nicolas Pitre | 3721924 | 2014-06-24 18:32:51 +0100 | [diff] [blame] | 151 | #ifdef CONFIG_ARM_CPU_SUSPEND |
| 152 | |
| 153 | static int __init nocache_trampoline(unsigned long _arg) |
| 154 | { |
| 155 | void (*cache_disable)(void) = (void *)_arg; |
| 156 | unsigned int mpidr = read_cpuid_mpidr(); |
| 157 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| 158 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 159 | phys_reset_t phys_reset; |
| 160 | |
| 161 | mcpm_set_entry_vector(cpu, cluster, cpu_resume); |
| 162 | setup_mm_for_reboot(); |
| 163 | |
| 164 | __mcpm_cpu_going_down(cpu, cluster); |
| 165 | BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); |
| 166 | cache_disable(); |
| 167 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); |
| 168 | __mcpm_cpu_down(cpu, cluster); |
| 169 | |
| 170 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 171 | phys_reset(virt_to_phys(mcpm_entry_point)); |
| 172 | BUG(); |
| 173 | } |
| 174 | |
| 175 | int __init mcpm_loopback(void (*cache_disable)(void)) |
| 176 | { |
| 177 | int ret; |
| 178 | |
| 179 | /* |
| 180 | * We're going to soft-restart the current CPU through the |
| 181 | * low-level MCPM code by leveraging the suspend/resume |
| 182 | * infrastructure. Let's play it safe by using cpu_pm_enter() |
| 183 | * in case the CPU init code path resets the VFP or similar. |
| 184 | */ |
| 185 | local_irq_disable(); |
| 186 | local_fiq_disable(); |
| 187 | ret = cpu_pm_enter(); |
| 188 | if (!ret) { |
| 189 | ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); |
| 190 | cpu_pm_exit(); |
| 191 | } |
| 192 | local_fiq_enable(); |
| 193 | local_irq_enable(); |
| 194 | if (ret) |
| 195 | pr_err("%s returned %d\n", __func__, ret); |
| 196 | return ret; |
| 197 | } |
| 198 | |
| 199 | #endif |
| 200 | |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 201 | struct sync_struct mcpm_sync; |
| 202 | |
| 203 | /* |
| 204 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. |
| 205 | * This must be called at the point of committing to teardown of a CPU. |
| 206 | * The CPU cache (SCTRL.C bit) is expected to still be active. |
| 207 | */ |
| 208 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) |
| 209 | { |
| 210 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; |
| 211 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); |
| 212 | } |
| 213 | |
| 214 | /* |
| 215 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the |
| 216 | * cluster can be torn down without disrupting this CPU. |
| 217 | * To avoid deadlocks, this must be called before a CPU is powered down. |
| 218 | * The CPU cache (SCTRL.C bit) is expected to be off. |
| 219 | * However L2 cache might or might not be active. |
| 220 | */ |
| 221 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) |
| 222 | { |
| 223 | dmb(); |
| 224 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; |
| 225 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); |
Will Deacon | 03aa658 | 2013-12-10 20:12:27 +0100 | [diff] [blame] | 226 | sev(); |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | /* |
| 230 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. |
| 231 | * @state: the final state of the cluster: |
| 232 | * CLUSTER_UP: no destructive teardown was done and the cluster has been |
| 233 | * restored to the previous state (CPU cache still active); or |
| 234 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off |
| 235 | * (CPU cache disabled, L2 cache either enabled or disabled). |
| 236 | */ |
| 237 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state) |
| 238 | { |
| 239 | dmb(); |
| 240 | mcpm_sync.clusters[cluster].cluster = state; |
| 241 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); |
Will Deacon | 03aa658 | 2013-12-10 20:12:27 +0100 | [diff] [blame] | 242 | sev(); |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | /* |
| 246 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. |
| 247 | * This function should be called by the last man, after local CPU teardown |
| 248 | * is complete. CPU cache expected to be active. |
| 249 | * |
| 250 | * Returns: |
| 251 | * false: the critical section was not entered because an inbound CPU was |
| 252 | * observed, or the cluster is already being set up; |
| 253 | * true: the critical section was entered: it is now safe to tear down the |
| 254 | * cluster. |
| 255 | */ |
| 256 | bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) |
| 257 | { |
| 258 | unsigned int i; |
| 259 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; |
| 260 | |
| 261 | /* Warn inbound CPUs that the cluster is being torn down: */ |
| 262 | c->cluster = CLUSTER_GOING_DOWN; |
| 263 | sync_cache_w(&c->cluster); |
| 264 | |
| 265 | /* Back out if the inbound cluster is already in the critical region: */ |
| 266 | sync_cache_r(&c->inbound); |
| 267 | if (c->inbound == INBOUND_COMING_UP) |
| 268 | goto abort; |
| 269 | |
| 270 | /* |
| 271 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local |
| 272 | * teardown is complete on each CPU before tearing down the cluster. |
| 273 | * |
| 274 | * If any CPU has been woken up again from the DOWN state, then we |
| 275 | * shouldn't be taking the cluster down at all: abort in that case. |
| 276 | */ |
| 277 | sync_cache_r(&c->cpus); |
| 278 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { |
| 279 | int cpustate; |
| 280 | |
| 281 | if (i == cpu) |
| 282 | continue; |
| 283 | |
| 284 | while (1) { |
| 285 | cpustate = c->cpus[i].cpu; |
| 286 | if (cpustate != CPU_GOING_DOWN) |
| 287 | break; |
| 288 | |
| 289 | wfe(); |
| 290 | sync_cache_r(&c->cpus[i].cpu); |
| 291 | } |
| 292 | |
| 293 | switch (cpustate) { |
| 294 | case CPU_DOWN: |
| 295 | continue; |
| 296 | |
| 297 | default: |
| 298 | goto abort; |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | return true; |
| 303 | |
| 304 | abort: |
| 305 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); |
| 306 | return false; |
| 307 | } |
| 308 | |
| 309 | int __mcpm_cluster_state(unsigned int cluster) |
| 310 | { |
| 311 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); |
| 312 | return mcpm_sync.clusters[cluster].cluster; |
| 313 | } |
| 314 | |
| 315 | extern unsigned long mcpm_power_up_setup_phys; |
| 316 | |
| 317 | int __init mcpm_sync_init( |
| 318 | void (*power_up_setup)(unsigned int affinity_level)) |
| 319 | { |
| 320 | unsigned int i, j, mpidr, this_cluster; |
| 321 | |
| 322 | BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); |
| 323 | BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); |
| 324 | |
| 325 | /* |
| 326 | * Set initial CPU and cluster states. |
| 327 | * Only one cluster is assumed to be active at this point. |
| 328 | */ |
| 329 | for (i = 0; i < MAX_NR_CLUSTERS; i++) { |
| 330 | mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; |
| 331 | mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; |
| 332 | for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) |
| 333 | mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; |
| 334 | } |
| 335 | mpidr = read_cpuid_mpidr(); |
| 336 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 337 | for_each_online_cpu(i) |
| 338 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; |
| 339 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; |
| 340 | sync_cache_w(&mcpm_sync); |
| 341 | |
| 342 | if (power_up_setup) { |
| 343 | mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); |
| 344 | sync_cache_w(&mcpm_power_up_setup_phys); |
| 345 | } |
| 346 | |
| 347 | return 0; |
| 348 | } |