Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM |
| 3 | * |
| 4 | * Created by: Nicolas Pitre, March 2012 |
| 5 | * Copyright: (C) 2012-2013 Linaro Limited |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/irqflags.h> |
| 15 | |
Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 16 | #include <asm/mcpm.h> |
| 17 | #include <asm/cacheflush.h> |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 18 | #include <asm/idmap.h> |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 19 | #include <asm/cputype.h> |
Nicolas Pitre | e8db288 | 2012-04-12 02:45:22 -0400 | [diff] [blame] | 20 | |
| 21 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
| 22 | |
| 23 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) |
| 24 | { |
| 25 | unsigned long val = ptr ? virt_to_phys(ptr) : 0; |
| 26 | mcpm_entry_vectors[cluster][cpu] = val; |
| 27 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); |
| 28 | } |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 29 | |
Nicolas Pitre | de885d1 | 2012-11-27 23:11:20 -0500 | [diff] [blame] | 30 | extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; |
| 31 | |
| 32 | void mcpm_set_early_poke(unsigned cpu, unsigned cluster, |
| 33 | unsigned long poke_phys_addr, unsigned long poke_val) |
| 34 | { |
| 35 | unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; |
| 36 | poke[0] = poke_phys_addr; |
| 37 | poke[1] = poke_val; |
Nicolas Pitre | efcfc46 | 2013-12-09 16:10:18 +0100 | [diff] [blame] | 38 | __sync_cache_range_w(poke, 2 * sizeof(*poke)); |
Nicolas Pitre | de885d1 | 2012-11-27 23:11:20 -0500 | [diff] [blame] | 39 | } |
| 40 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 41 | static const struct mcpm_platform_ops *platform_ops; |
| 42 | |
| 43 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) |
| 44 | { |
| 45 | if (platform_ops) |
| 46 | return -EBUSY; |
| 47 | platform_ops = ops; |
| 48 | return 0; |
| 49 | } |
| 50 | |
Nicolas Pitre | 4530e4b | 2014-04-22 00:25:35 +0100 | [diff] [blame] | 51 | bool mcpm_is_available(void) |
| 52 | { |
| 53 | return (platform_ops) ? true : false; |
| 54 | } |
| 55 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 56 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) |
| 57 | { |
| 58 | if (!platform_ops) |
| 59 | return -EUNATCH; /* try not to shadow power_up errors */ |
| 60 | might_sleep(); |
| 61 | return platform_ops->power_up(cpu, cluster); |
| 62 | } |
| 63 | |
| 64 | typedef void (*phys_reset_t)(unsigned long); |
| 65 | |
| 66 | void mcpm_cpu_power_down(void) |
| 67 | { |
| 68 | phys_reset_t phys_reset; |
| 69 | |
Nicolas Pitre | d0cdef6 | 2013-09-25 23:26:24 +0100 | [diff] [blame] | 70 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) |
| 71 | return; |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 72 | BUG_ON(!irqs_disabled()); |
| 73 | |
| 74 | /* |
| 75 | * Do this before calling into the power_down method, |
| 76 | * as it might not always be safe to do afterwards. |
| 77 | */ |
| 78 | setup_mm_for_reboot(); |
| 79 | |
| 80 | platform_ops->power_down(); |
| 81 | |
| 82 | /* |
| 83 | * It is possible for a power_up request to happen concurrently |
| 84 | * with a power_down request for the same CPU. In this case the |
| 85 | * power_down method might not be able to actually enter a |
| 86 | * powered down state with the WFI instruction if the power_up |
| 87 | * method has removed the required reset condition. The |
| 88 | * power_down method is then allowed to return. We must perform |
| 89 | * a re-entry in the kernel as if the power_up method just had |
| 90 | * deasserted reset on the CPU. |
| 91 | * |
| 92 | * To simplify race issues, the platform specific implementation |
| 93 | * must accommodate for the possibility of unordered calls to |
| 94 | * power_down and power_up with a usage count. Therefore, if a |
| 95 | * call to power_up is issued for a CPU that is not down, then |
| 96 | * the next call to power_down must not attempt a full shutdown |
| 97 | * but only do the minimum (normally disabling L1 cache and CPU |
| 98 | * coherency) and return just as if a concurrent power_up request |
| 99 | * had happened as described above. |
| 100 | */ |
| 101 | |
| 102 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 103 | phys_reset(virt_to_phys(mcpm_entry_point)); |
| 104 | |
| 105 | /* should never get here */ |
| 106 | BUG(); |
| 107 | } |
| 108 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 109 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 110 | { |
| 111 | int ret; |
| 112 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 113 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 114 | return -EUNATCH; |
| 115 | |
Dave Martin | 166aaf3 | 2014-04-17 16:58:39 +0100 | [diff] [blame] | 116 | ret = platform_ops->wait_for_powerdown(cpu, cluster); |
Dave Martin | 0de0d64 | 2013-10-01 19:58:17 +0100 | [diff] [blame] | 117 | if (ret) |
| 118 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", |
| 119 | __func__, cpu, cluster, ret); |
| 120 | |
| 121 | return ret; |
| 122 | } |
| 123 | |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 124 | void mcpm_cpu_suspend(u64 expected_residency) |
| 125 | { |
| 126 | phys_reset_t phys_reset; |
| 127 | |
Nicolas Pitre | d0cdef6 | 2013-09-25 23:26:24 +0100 | [diff] [blame] | 128 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend)) |
| 129 | return; |
Nicolas Pitre | 7c2b860 | 2012-09-20 16:05:37 -0400 | [diff] [blame] | 130 | BUG_ON(!irqs_disabled()); |
| 131 | |
| 132 | /* Very similar to mcpm_cpu_power_down() */ |
| 133 | setup_mm_for_reboot(); |
| 134 | platform_ops->suspend(expected_residency); |
| 135 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 136 | phys_reset(virt_to_phys(mcpm_entry_point)); |
| 137 | BUG(); |
| 138 | } |
| 139 | |
| 140 | int mcpm_cpu_powered_up(void) |
| 141 | { |
| 142 | if (!platform_ops) |
| 143 | return -EUNATCH; |
| 144 | if (platform_ops->powered_up) |
| 145 | platform_ops->powered_up(); |
| 146 | return 0; |
| 147 | } |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 148 | |
| 149 | struct sync_struct mcpm_sync; |
| 150 | |
| 151 | /* |
| 152 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. |
| 153 | * This must be called at the point of committing to teardown of a CPU. |
| 154 | * The CPU cache (SCTRL.C bit) is expected to still be active. |
| 155 | */ |
| 156 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) |
| 157 | { |
| 158 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; |
| 159 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the |
| 164 | * cluster can be torn down without disrupting this CPU. |
| 165 | * To avoid deadlocks, this must be called before a CPU is powered down. |
| 166 | * The CPU cache (SCTRL.C bit) is expected to be off. |
| 167 | * However L2 cache might or might not be active. |
| 168 | */ |
| 169 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) |
| 170 | { |
| 171 | dmb(); |
| 172 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; |
| 173 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); |
Will Deacon | 03aa658 | 2013-12-10 20:12:27 +0100 | [diff] [blame] | 174 | sev(); |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* |
| 178 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. |
| 179 | * @state: the final state of the cluster: |
| 180 | * CLUSTER_UP: no destructive teardown was done and the cluster has been |
| 181 | * restored to the previous state (CPU cache still active); or |
| 182 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off |
| 183 | * (CPU cache disabled, L2 cache either enabled or disabled). |
| 184 | */ |
| 185 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state) |
| 186 | { |
| 187 | dmb(); |
| 188 | mcpm_sync.clusters[cluster].cluster = state; |
| 189 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); |
Will Deacon | 03aa658 | 2013-12-10 20:12:27 +0100 | [diff] [blame] | 190 | sev(); |
Dave Martin | 7fe31d2 | 2012-07-17 14:25:42 +0100 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | /* |
| 194 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. |
| 195 | * This function should be called by the last man, after local CPU teardown |
| 196 | * is complete. CPU cache expected to be active. |
| 197 | * |
| 198 | * Returns: |
| 199 | * false: the critical section was not entered because an inbound CPU was |
| 200 | * observed, or the cluster is already being set up; |
| 201 | * true: the critical section was entered: it is now safe to tear down the |
| 202 | * cluster. |
| 203 | */ |
| 204 | bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) |
| 205 | { |
| 206 | unsigned int i; |
| 207 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; |
| 208 | |
| 209 | /* Warn inbound CPUs that the cluster is being torn down: */ |
| 210 | c->cluster = CLUSTER_GOING_DOWN; |
| 211 | sync_cache_w(&c->cluster); |
| 212 | |
| 213 | /* Back out if the inbound cluster is already in the critical region: */ |
| 214 | sync_cache_r(&c->inbound); |
| 215 | if (c->inbound == INBOUND_COMING_UP) |
| 216 | goto abort; |
| 217 | |
| 218 | /* |
| 219 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local |
| 220 | * teardown is complete on each CPU before tearing down the cluster. |
| 221 | * |
| 222 | * If any CPU has been woken up again from the DOWN state, then we |
| 223 | * shouldn't be taking the cluster down at all: abort in that case. |
| 224 | */ |
| 225 | sync_cache_r(&c->cpus); |
| 226 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { |
| 227 | int cpustate; |
| 228 | |
| 229 | if (i == cpu) |
| 230 | continue; |
| 231 | |
| 232 | while (1) { |
| 233 | cpustate = c->cpus[i].cpu; |
| 234 | if (cpustate != CPU_GOING_DOWN) |
| 235 | break; |
| 236 | |
| 237 | wfe(); |
| 238 | sync_cache_r(&c->cpus[i].cpu); |
| 239 | } |
| 240 | |
| 241 | switch (cpustate) { |
| 242 | case CPU_DOWN: |
| 243 | continue; |
| 244 | |
| 245 | default: |
| 246 | goto abort; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | return true; |
| 251 | |
| 252 | abort: |
| 253 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); |
| 254 | return false; |
| 255 | } |
| 256 | |
| 257 | int __mcpm_cluster_state(unsigned int cluster) |
| 258 | { |
| 259 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); |
| 260 | return mcpm_sync.clusters[cluster].cluster; |
| 261 | } |
| 262 | |
| 263 | extern unsigned long mcpm_power_up_setup_phys; |
| 264 | |
| 265 | int __init mcpm_sync_init( |
| 266 | void (*power_up_setup)(unsigned int affinity_level)) |
| 267 | { |
| 268 | unsigned int i, j, mpidr, this_cluster; |
| 269 | |
| 270 | BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); |
| 271 | BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); |
| 272 | |
| 273 | /* |
| 274 | * Set initial CPU and cluster states. |
| 275 | * Only one cluster is assumed to be active at this point. |
| 276 | */ |
| 277 | for (i = 0; i < MAX_NR_CLUSTERS; i++) { |
| 278 | mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; |
| 279 | mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; |
| 280 | for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) |
| 281 | mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; |
| 282 | } |
| 283 | mpidr = read_cpuid_mpidr(); |
| 284 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 285 | for_each_online_cpu(i) |
| 286 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; |
| 287 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; |
| 288 | sync_cache_w(&mcpm_sync); |
| 289 | |
| 290 | if (power_up_setup) { |
| 291 | mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); |
| 292 | sync_cache_w(&mcpm_power_up_setup_phys); |
| 293 | } |
| 294 | |
| 295 | return 0; |
| 296 | } |