Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 3 | * http://www.samsung.com |
| 4 | * |
| 5 | * arch/arm/mach-exynos/mcpm-exynos.c |
| 6 | * |
| 7 | * Based on arch/arm/mach-vexpress/dcscb.c |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/arm-cci.h> |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/io.h> |
| 17 | #include <linux/of_address.h> |
| 18 | |
| 19 | #include <asm/cputype.h> |
| 20 | #include <asm/cp15.h> |
| 21 | #include <asm/mcpm.h> |
| 22 | |
| 23 | #include "regs-pmu.h" |
| 24 | #include "common.h" |
| 25 | |
| 26 | #define EXYNOS5420_CPUS_PER_CLUSTER 4 |
| 27 | #define EXYNOS5420_NR_CLUSTERS 2 |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 28 | |
| 29 | /* |
| 30 | * The common v7_exit_coherency_flush API could not be used because of the |
| 31 | * Erratum 799270 workaround. This macro is the same as the common one (in |
| 32 | * arch/arm/include/asm/cacheflush.h) except for the erratum handling. |
| 33 | */ |
| 34 | #define exynos_v7_exit_coherency_flush(level) \ |
| 35 | asm volatile( \ |
| 36 | "stmfd sp!, {fp, ip}\n\t"\ |
| 37 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \ |
| 38 | "bic r0, r0, #"__stringify(CR_C)"\n\t" \ |
| 39 | "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \ |
| 40 | "isb\n\t"\ |
| 41 | "bl v7_flush_dcache_"__stringify(level)"\n\t" \ |
| 42 | "clrex\n\t"\ |
| 43 | "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \ |
| 44 | "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \ |
| 45 | /* Dummy Load of a device register to avoid Erratum 799270 */ \ |
| 46 | "ldr r4, [%0]\n\t" \ |
| 47 | "and r4, r4, #0\n\t" \ |
| 48 | "orr r0, r0, r4\n\t" \ |
| 49 | "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \ |
| 50 | "isb\n\t" \ |
| 51 | "dsb\n\t" \ |
| 52 | "ldmfd sp!, {fp, ip}" \ |
| 53 | : \ |
| 54 | : "Ir" (S5P_INFORM0) \ |
| 55 | : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ |
| 56 | "r9", "r10", "lr", "memory") |
| 57 | |
| 58 | /* |
| 59 | * We can't use regular spinlocks. In the switcher case, it is possible |
| 60 | * for an outbound CPU to call power_down() after its inbound counterpart |
| 61 | * is already live using the same logical CPU number which trips lockdep |
| 62 | * debugging. |
| 63 | */ |
| 64 | static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| 65 | static int |
| 66 | cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS]; |
| 67 | |
| 68 | #define exynos_cluster_usecnt(cluster) \ |
| 69 | (cpu_use_count[0][cluster] + \ |
| 70 | cpu_use_count[1][cluster] + \ |
| 71 | cpu_use_count[2][cluster] + \ |
| 72 | cpu_use_count[3][cluster]) |
| 73 | |
| 74 | #define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster) |
| 75 | |
| 76 | static int exynos_cluster_power_control(unsigned int cluster, int enable) |
| 77 | { |
| 78 | unsigned int tries = 100; |
| 79 | unsigned int val; |
| 80 | |
| 81 | if (enable) { |
| 82 | exynos_cluster_power_up(cluster); |
| 83 | val = S5P_CORE_LOCAL_PWR_EN; |
| 84 | } else { |
| 85 | exynos_cluster_power_down(cluster); |
| 86 | val = 0; |
| 87 | } |
| 88 | |
| 89 | /* Wait until cluster power control is applied */ |
| 90 | while (tries--) { |
| 91 | if (exynos_cluster_power_state(cluster) == val) |
| 92 | return 0; |
| 93 | |
| 94 | cpu_relax(); |
| 95 | } |
| 96 | pr_debug("timed out waiting for cluster %u to power %s\n", cluster, |
| 97 | enable ? "on" : "off"); |
| 98 | |
| 99 | return -ETIMEDOUT; |
| 100 | } |
| 101 | |
| 102 | static int exynos_power_up(unsigned int cpu, unsigned int cluster) |
| 103 | { |
| 104 | unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); |
| 105 | int err = 0; |
| 106 | |
| 107 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| 108 | if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER || |
| 109 | cluster >= EXYNOS5420_NR_CLUSTERS) |
| 110 | return -EINVAL; |
| 111 | |
| 112 | /* |
| 113 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq |
| 114 | * variant exists, we need to disable IRQs manually here. |
| 115 | */ |
| 116 | local_irq_disable(); |
| 117 | arch_spin_lock(&exynos_mcpm_lock); |
| 118 | |
| 119 | cpu_use_count[cpu][cluster]++; |
| 120 | if (cpu_use_count[cpu][cluster] == 1) { |
| 121 | bool was_cluster_down = |
| 122 | (exynos_cluster_usecnt(cluster) == 1); |
| 123 | |
| 124 | /* |
| 125 | * Turn on the cluster (L2/COMMON) and then power on the |
| 126 | * cores. |
| 127 | */ |
| 128 | if (was_cluster_down) |
| 129 | err = exynos_cluster_power_control(cluster, 1); |
| 130 | |
| 131 | if (!err) |
| 132 | exynos_cpu_power_up(cpunr); |
| 133 | else |
| 134 | exynos_cluster_power_control(cluster, 0); |
| 135 | } else if (cpu_use_count[cpu][cluster] != 2) { |
| 136 | /* |
| 137 | * The only possible values are: |
| 138 | * 0 = CPU down |
| 139 | * 1 = CPU (still) up |
| 140 | * 2 = CPU requested to be up before it had a chance |
| 141 | * to actually make itself down. |
| 142 | * Any other value is a bug. |
| 143 | */ |
| 144 | BUG(); |
| 145 | } |
| 146 | |
| 147 | arch_spin_unlock(&exynos_mcpm_lock); |
| 148 | local_irq_enable(); |
| 149 | |
| 150 | return err; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * NOTE: This function requires the stack data to be visible through power down |
| 155 | * and can only be executed on processors like A15 and A7 that hit the cache |
| 156 | * with the C bit clear in the SCTLR register. |
| 157 | */ |
| 158 | static void exynos_power_down(void) |
| 159 | { |
| 160 | unsigned int mpidr, cpu, cluster; |
| 161 | bool last_man = false, skip_wfi = false; |
| 162 | unsigned int cpunr; |
| 163 | |
| 164 | mpidr = read_cpuid_mpidr(); |
| 165 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| 166 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 167 | cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); |
| 168 | |
| 169 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| 170 | BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || |
| 171 | cluster >= EXYNOS5420_NR_CLUSTERS); |
| 172 | |
| 173 | __mcpm_cpu_going_down(cpu, cluster); |
| 174 | |
| 175 | arch_spin_lock(&exynos_mcpm_lock); |
| 176 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); |
| 177 | cpu_use_count[cpu][cluster]--; |
| 178 | if (cpu_use_count[cpu][cluster] == 0) { |
| 179 | exynos_cpu_power_down(cpunr); |
| 180 | |
| 181 | if (exynos_cluster_unused(cluster)) |
| 182 | /* TODO: Turn off the cluster here to save power. */ |
| 183 | last_man = true; |
| 184 | } else if (cpu_use_count[cpu][cluster] == 1) { |
| 185 | /* |
| 186 | * A power_up request went ahead of us. |
| 187 | * Even if we do not want to shut this CPU down, |
| 188 | * the caller expects a certain state as if the WFI |
| 189 | * was aborted. So let's continue with cache cleaning. |
| 190 | */ |
| 191 | skip_wfi = true; |
| 192 | } else { |
| 193 | BUG(); |
| 194 | } |
| 195 | |
| 196 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { |
| 197 | arch_spin_unlock(&exynos_mcpm_lock); |
| 198 | |
| 199 | if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { |
| 200 | /* |
| 201 | * On the Cortex-A15 we need to disable |
| 202 | * L2 prefetching before flushing the cache. |
| 203 | */ |
| 204 | asm volatile( |
| 205 | "mcr p15, 1, %0, c15, c0, 3\n\t" |
| 206 | "isb\n\t" |
| 207 | "dsb" |
| 208 | : : "r" (0x400)); |
| 209 | } |
| 210 | |
| 211 | /* Flush all cache levels for this cluster. */ |
| 212 | exynos_v7_exit_coherency_flush(all); |
| 213 | |
| 214 | /* |
| 215 | * Disable cluster-level coherency by masking |
| 216 | * incoming snoops and DVM messages: |
| 217 | */ |
| 218 | cci_disable_port_by_cpu(mpidr); |
| 219 | |
| 220 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); |
| 221 | } else { |
| 222 | arch_spin_unlock(&exynos_mcpm_lock); |
| 223 | |
| 224 | /* Disable and flush the local CPU cache. */ |
| 225 | exynos_v7_exit_coherency_flush(louis); |
| 226 | } |
| 227 | |
| 228 | __mcpm_cpu_down(cpu, cluster); |
| 229 | |
| 230 | /* Now we are prepared for power-down, do it: */ |
| 231 | if (!skip_wfi) |
| 232 | wfi(); |
| 233 | |
| 234 | /* Not dead at this point? Let our caller cope. */ |
| 235 | } |
| 236 | |
Kukjin Kim | 7c5688e | 2014-05-28 00:04:34 +0900 | [diff] [blame] | 237 | static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 238 | { |
| 239 | unsigned int tries = 100; |
| 240 | unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); |
| 241 | |
| 242 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| 243 | BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || |
| 244 | cluster >= EXYNOS5420_NR_CLUSTERS); |
| 245 | |
| 246 | /* Wait for the core state to be OFF */ |
| 247 | while (tries--) { |
| 248 | if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) { |
| 249 | if ((exynos_cpu_power_state(cpunr) == 0)) |
| 250 | return 0; /* success: the CPU is halted */ |
| 251 | } |
| 252 | |
| 253 | /* Otherwise, wait and retry: */ |
| 254 | msleep(1); |
| 255 | } |
| 256 | |
| 257 | return -ETIMEDOUT; /* timeout */ |
| 258 | } |
| 259 | |
Chander Kashyap | fc2cac4 | 2014-07-05 06:24:35 +0900 | [diff] [blame] | 260 | static void exynos_powered_up(void) |
| 261 | { |
| 262 | unsigned int mpidr, cpu, cluster; |
| 263 | |
| 264 | mpidr = read_cpuid_mpidr(); |
| 265 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| 266 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 267 | |
| 268 | arch_spin_lock(&exynos_mcpm_lock); |
| 269 | if (cpu_use_count[cpu][cluster] == 0) |
| 270 | cpu_use_count[cpu][cluster] = 1; |
| 271 | arch_spin_unlock(&exynos_mcpm_lock); |
| 272 | } |
| 273 | |
| 274 | static void exynos_suspend(u64 residency) |
| 275 | { |
| 276 | unsigned int mpidr, cpunr; |
| 277 | |
| 278 | exynos_power_down(); |
| 279 | |
| 280 | /* |
| 281 | * Execution reaches here only if cpu did not power down. |
| 282 | * Hence roll back the changes done in exynos_power_down function. |
| 283 | * |
| 284 | * CAUTION: "This function requires the stack data to be visible through |
| 285 | * power down and can only be executed on processors like A15 and A7 |
| 286 | * that hit the cache with the C bit clear in the SCTLR register." |
| 287 | */ |
| 288 | mpidr = read_cpuid_mpidr(); |
| 289 | cpunr = exynos_pmu_cpunr(mpidr); |
| 290 | |
| 291 | exynos_cpu_power_up(cpunr); |
| 292 | } |
| 293 | |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 294 | static const struct mcpm_platform_ops exynos_power_ops = { |
| 295 | .power_up = exynos_power_up, |
| 296 | .power_down = exynos_power_down, |
Kukjin Kim | 7c5688e | 2014-05-28 00:04:34 +0900 | [diff] [blame] | 297 | .wait_for_powerdown = exynos_wait_for_powerdown, |
Chander Kashyap | fc2cac4 | 2014-07-05 06:24:35 +0900 | [diff] [blame] | 298 | .suspend = exynos_suspend, |
| 299 | .powered_up = exynos_powered_up, |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 300 | }; |
| 301 | |
| 302 | static void __init exynos_mcpm_usage_count_init(void) |
| 303 | { |
| 304 | unsigned int mpidr, cpu, cluster; |
| 305 | |
| 306 | mpidr = read_cpuid_mpidr(); |
| 307 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| 308 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 309 | |
| 310 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| 311 | BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || |
| 312 | cluster >= EXYNOS5420_NR_CLUSTERS); |
| 313 | |
| 314 | cpu_use_count[cpu][cluster] = 1; |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * Enable cluster-level coherency, in preparation for turning on the MMU. |
| 319 | */ |
| 320 | static void __naked exynos_pm_power_up_setup(unsigned int affinity_level) |
| 321 | { |
| 322 | asm volatile ("\n" |
| 323 | "cmp r0, #1\n" |
| 324 | "bxne lr\n" |
| 325 | "b cci_enable_port_for_self"); |
| 326 | } |
| 327 | |
Abhilash Kesavan | f99acff | 2014-05-28 01:19:35 +0900 | [diff] [blame] | 328 | static const struct of_device_id exynos_dt_mcpm_match[] = { |
| 329 | { .compatible = "samsung,exynos5420" }, |
| 330 | { .compatible = "samsung,exynos5800" }, |
| 331 | {}, |
| 332 | }; |
| 333 | |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 334 | static int __init exynos_mcpm_init(void) |
| 335 | { |
| 336 | struct device_node *node; |
| 337 | void __iomem *ns_sram_base_addr; |
| 338 | int ret; |
| 339 | |
Abhilash Kesavan | f99acff | 2014-05-28 01:19:35 +0900 | [diff] [blame] | 340 | node = of_find_matching_node(NULL, exynos_dt_mcpm_match); |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 341 | if (!node) |
| 342 | return -ENODEV; |
| 343 | of_node_put(node); |
| 344 | |
| 345 | if (!cci_probed()) |
| 346 | return -ENODEV; |
| 347 | |
| 348 | node = of_find_compatible_node(NULL, NULL, |
| 349 | "samsung,exynos4210-sysram-ns"); |
| 350 | if (!node) |
| 351 | return -ENODEV; |
| 352 | |
| 353 | ns_sram_base_addr = of_iomap(node, 0); |
| 354 | of_node_put(node); |
| 355 | if (!ns_sram_base_addr) { |
| 356 | pr_err("failed to map non-secure iRAM base address\n"); |
| 357 | return -ENOMEM; |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * To increase the stability of KFC reset we need to program |
| 362 | * the PMU SPARE3 register |
| 363 | */ |
| 364 | __raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3); |
| 365 | |
| 366 | exynos_mcpm_usage_count_init(); |
| 367 | |
| 368 | ret = mcpm_platform_register(&exynos_power_ops); |
| 369 | if (!ret) |
| 370 | ret = mcpm_sync_init(exynos_pm_power_up_setup); |
| 371 | if (ret) { |
| 372 | iounmap(ns_sram_base_addr); |
| 373 | return ret; |
| 374 | } |
| 375 | |
| 376 | mcpm_smp_set_ops(); |
| 377 | |
| 378 | pr_info("Exynos MCPM support installed\n"); |
| 379 | |
| 380 | /* |
Doug Anderson | 7cbcb9d | 2014-06-21 19:30:53 +0900 | [diff] [blame] | 381 | * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr |
| 382 | * as part of secondary_cpu_start(). Let's redirect it to the |
| 383 | * mcpm_entry_point(). |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 384 | */ |
Doug Anderson | 7cbcb9d | 2014-06-21 19:30:53 +0900 | [diff] [blame] | 385 | __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ |
| 386 | __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ |
| 387 | __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); |
Abhilash Kesavan | ccf5511 | 2014-05-16 04:26:30 +0900 | [diff] [blame] | 388 | |
| 389 | iounmap(ns_sram_base_addr); |
| 390 | |
| 391 | return ret; |
| 392 | } |
| 393 | |
| 394 | early_initcall(exynos_mcpm_init); |