Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Broadcom Brahma-B15 CPU read-ahead cache management functions |
| 3 | * |
| 4 | * Copyright (C) 2015-2016 Broadcom |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/of_address.h> |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 16 | #include <linux/notifier.h> |
| 17 | #include <linux/cpu.h> |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 18 | #include <linux/syscore_ops.h> |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 19 | #include <linux/reboot.h> |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 20 | |
| 21 | #include <asm/cacheflush.h> |
| 22 | #include <asm/hardware/cache-b15-rac.h> |
| 23 | |
| 24 | extern void v7_flush_kern_cache_all(void); |
| 25 | |
| 26 | /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */ |
| 27 | #define RAC_CONFIG0_REG (0x78) |
| 28 | #define RACENPREF_MASK (0x3) |
| 29 | #define RACPREFINST_SHIFT (0) |
| 30 | #define RACENINST_SHIFT (2) |
| 31 | #define RACPREFDATA_SHIFT (4) |
| 32 | #define RACENDATA_SHIFT (6) |
| 33 | #define RAC_CPU_SHIFT (8) |
| 34 | #define RACCFG_MASK (0xff) |
| 35 | #define RAC_CONFIG1_REG (0x7c) |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 36 | /* Brahma-B15 is a quad-core only design */ |
| 37 | #define B15_RAC_FLUSH_REG (0x80) |
| 38 | /* Brahma-B53 is an octo-core design */ |
| 39 | #define B53_RAC_FLUSH_REG (0x84) |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 40 | #define FLUSH_RAC (1 << 0) |
| 41 | |
| 42 | /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */ |
| 43 | #define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \ |
| 44 | RACENPREF_MASK << RACENINST_SHIFT | \ |
| 45 | 1 << RACPREFDATA_SHIFT | \ |
| 46 | RACENPREF_MASK << RACENDATA_SHIFT) |
| 47 | |
| 48 | #define RAC_ENABLED 0 |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 49 | /* Special state where we want to bypass the spinlock and call directly |
| 50 | * into the v7 cache maintenance operations during suspend/resume |
| 51 | */ |
| 52 | #define RAC_SUSPENDED 1 |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 53 | |
| 54 | static void __iomem *b15_rac_base; |
| 55 | static DEFINE_SPINLOCK(rac_lock); |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 56 | |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 57 | static u32 rac_config0_reg; |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 58 | static u32 rac_flush_offset; |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 59 | |
| 60 | /* Initialization flag to avoid checking for b15_rac_base, and to prevent |
| 61 | * multi-platform kernels from crashing here as well. |
| 62 | */ |
| 63 | static unsigned long b15_rac_flags; |
| 64 | |
| 65 | static inline u32 __b15_rac_disable(void) |
| 66 | { |
| 67 | u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); |
| 68 | __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG); |
| 69 | dmb(); |
| 70 | return val; |
| 71 | } |
| 72 | |
| 73 | static inline void __b15_rac_flush(void) |
| 74 | { |
| 75 | u32 reg; |
| 76 | |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 77 | __raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset); |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 78 | do { |
| 79 | /* This dmb() is required to force the Bus Interface Unit |
| 80 | * to clean oustanding writes, and forces an idle cycle |
| 81 | * to be inserted. |
| 82 | */ |
| 83 | dmb(); |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 84 | reg = __raw_readl(b15_rac_base + rac_flush_offset); |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 85 | } while (reg & FLUSH_RAC); |
| 86 | } |
| 87 | |
| 88 | static inline u32 b15_rac_disable_and_flush(void) |
| 89 | { |
| 90 | u32 reg; |
| 91 | |
| 92 | reg = __b15_rac_disable(); |
| 93 | __b15_rac_flush(); |
| 94 | return reg; |
| 95 | } |
| 96 | |
| 97 | static inline void __b15_rac_enable(u32 val) |
| 98 | { |
| 99 | __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG); |
| 100 | /* dsb() is required here to be consistent with __flush_icache_all() */ |
| 101 | dsb(); |
| 102 | } |
| 103 | |
| 104 | #define BUILD_RAC_CACHE_OP(name, bar) \ |
| 105 | void b15_flush_##name(void) \ |
| 106 | { \ |
| 107 | unsigned int do_flush; \ |
| 108 | u32 val = 0; \ |
| 109 | \ |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 110 | if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \ |
| 111 | v7_flush_##name(); \ |
| 112 | bar; \ |
| 113 | return; \ |
| 114 | } \ |
| 115 | \ |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 116 | spin_lock(&rac_lock); \ |
| 117 | do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \ |
| 118 | if (do_flush) \ |
| 119 | val = b15_rac_disable_and_flush(); \ |
| 120 | v7_flush_##name(); \ |
| 121 | if (!do_flush) \ |
| 122 | bar; \ |
| 123 | else \ |
| 124 | __b15_rac_enable(val); \ |
| 125 | spin_unlock(&rac_lock); \ |
| 126 | } |
| 127 | |
| 128 | #define nobarrier |
| 129 | |
| 130 | /* The readahead cache present in the Brahma-B15 CPU is a special piece of |
| 131 | * hardware after the integrated L2 cache of the B15 CPU complex whose purpose |
| 132 | * is to prefetch instruction and/or data with a line size of either 64 bytes |
| 133 | * or 256 bytes. The rationale is that the data-bus of the CPU interface is |
| 134 | * optimized for 256-bytes transactions, and enabling the readahead cache |
| 135 | * provides a significant performance boost we want it enabled (typically |
| 136 | * twice the performance for a memcpy benchmark application). |
| 137 | * |
| 138 | * The readahead cache is transparent for Modified Virtual Addresses |
| 139 | * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and |
| 140 | * DCCIMVAC. |
| 141 | * |
| 142 | * It is however not transparent for the following cache maintenance |
| 143 | * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely |
| 144 | * what we are patching here with our BUILD_RAC_CACHE_OP here. |
| 145 | */ |
| 146 | BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier); |
| 147 | |
| 148 | static void b15_rac_enable(void) |
| 149 | { |
| 150 | unsigned int cpu; |
| 151 | u32 enable = 0; |
| 152 | |
| 153 | for_each_possible_cpu(cpu) |
| 154 | enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT)); |
| 155 | |
| 156 | b15_rac_disable_and_flush(); |
| 157 | __b15_rac_enable(enable); |
| 158 | } |
| 159 | |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 160 | static int b15_rac_reboot_notifier(struct notifier_block *nb, |
| 161 | unsigned long action, |
| 162 | void *data) |
| 163 | { |
| 164 | /* During kexec, we are not yet migrated on the boot CPU, so we need to |
| 165 | * make sure we are SMP safe here. Once the RAC is disabled, flag it as |
| 166 | * suspended such that the hotplug notifier returns early. |
| 167 | */ |
| 168 | if (action == SYS_RESTART) { |
| 169 | spin_lock(&rac_lock); |
| 170 | b15_rac_disable_and_flush(); |
| 171 | clear_bit(RAC_ENABLED, &b15_rac_flags); |
| 172 | set_bit(RAC_SUSPENDED, &b15_rac_flags); |
| 173 | spin_unlock(&rac_lock); |
| 174 | } |
| 175 | |
| 176 | return NOTIFY_DONE; |
| 177 | } |
| 178 | |
| 179 | static struct notifier_block b15_rac_reboot_nb = { |
| 180 | .notifier_call = b15_rac_reboot_notifier, |
| 181 | }; |
| 182 | |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 183 | /* The CPU hotplug case is the most interesting one, we basically need to make |
| 184 | * sure that the RAC is disabled for the entire system prior to having a CPU |
| 185 | * die, in particular prior to this dying CPU having exited the coherency |
| 186 | * domain. |
| 187 | * |
| 188 | * Once this CPU is marked dead, we can safely re-enable the RAC for the |
| 189 | * remaining CPUs in the system which are still online. |
| 190 | * |
| 191 | * Offlining a CPU is the problematic case, onlining a CPU is not much of an |
| 192 | * issue since the CPU and its cache-level hierarchy will start filling with |
| 193 | * the RAC disabled, so L1 and L2 only. |
| 194 | * |
| 195 | * In this function, we should NOT have to verify any unsafe setting/condition |
| 196 | * b15_rac_base: |
| 197 | * |
| 198 | * It is protected by the RAC_ENABLED flag which is cleared by default, and |
| 199 | * being cleared when initial procedure is done. b15_rac_base had been set at |
| 200 | * that time. |
| 201 | * |
| 202 | * RAC_ENABLED: |
| 203 | * There is a small timing windows, in b15_rac_init(), between |
| 204 | * cpuhp_setup_state_*() |
| 205 | * ... |
| 206 | * set RAC_ENABLED |
| 207 | * However, there is no hotplug activity based on the Linux booting procedure. |
| 208 | * |
| 209 | * Since we have to disable RAC for all cores, we keep RAC on as long as as |
| 210 | * possible (disable it as late as possible) to gain the cache benefit. |
| 211 | * |
| 212 | * Thus, dying/dead states are chosen here |
| 213 | * |
| 214 | * We are choosing not do disable the RAC on a per-CPU basis, here, if we did |
| 215 | * we would want to consider disabling it as early as possible to benefit the |
| 216 | * other active CPUs. |
| 217 | */ |
| 218 | |
| 219 | /* Running on the dying CPU */ |
| 220 | static int b15_rac_dying_cpu(unsigned int cpu) |
| 221 | { |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 222 | /* During kexec/reboot, the RAC is disabled via the reboot notifier |
| 223 | * return early here. |
| 224 | */ |
| 225 | if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) |
| 226 | return 0; |
| 227 | |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 228 | spin_lock(&rac_lock); |
| 229 | |
| 230 | /* Indicate that we are starting a hotplug procedure */ |
| 231 | __clear_bit(RAC_ENABLED, &b15_rac_flags); |
| 232 | |
| 233 | /* Disable the readahead cache and save its value to a global */ |
| 234 | rac_config0_reg = b15_rac_disable_and_flush(); |
| 235 | |
| 236 | spin_unlock(&rac_lock); |
| 237 | |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | /* Running on a non-dying CPU */ |
| 242 | static int b15_rac_dead_cpu(unsigned int cpu) |
| 243 | { |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 244 | /* During kexec/reboot, the RAC is disabled via the reboot notifier |
| 245 | * return early here. |
| 246 | */ |
| 247 | if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) |
| 248 | return 0; |
| 249 | |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 250 | spin_lock(&rac_lock); |
| 251 | |
| 252 | /* And enable it */ |
| 253 | __b15_rac_enable(rac_config0_reg); |
| 254 | __set_bit(RAC_ENABLED, &b15_rac_flags); |
| 255 | |
| 256 | spin_unlock(&rac_lock); |
| 257 | |
| 258 | return 0; |
| 259 | } |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 260 | |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 261 | static int b15_rac_suspend(void) |
| 262 | { |
| 263 | /* Suspend the read-ahead cache oeprations, forcing our cache |
| 264 | * implementation to fallback to the regular ARMv7 calls. |
| 265 | * |
| 266 | * We are guaranteed to be running on the boot CPU at this point and |
| 267 | * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy |
| 268 | * here. |
| 269 | */ |
| 270 | rac_config0_reg = b15_rac_disable_and_flush(); |
| 271 | set_bit(RAC_SUSPENDED, &b15_rac_flags); |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static void b15_rac_resume(void) |
| 277 | { |
| 278 | /* Coming out of a S3 suspend/resume cycle, the read-ahead cache |
| 279 | * register RAC_CONFIG0_REG will be restored to its default value, make |
| 280 | * sure we re-enable it and set the enable flag, we are also guaranteed |
| 281 | * to run on the boot CPU, so not racy again. |
| 282 | */ |
| 283 | __b15_rac_enable(rac_config0_reg); |
| 284 | clear_bit(RAC_SUSPENDED, &b15_rac_flags); |
| 285 | } |
| 286 | |
| 287 | static struct syscore_ops b15_rac_syscore_ops = { |
| 288 | .suspend = b15_rac_suspend, |
| 289 | .resume = b15_rac_resume, |
| 290 | }; |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 291 | |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 292 | static int __init b15_rac_init(void) |
| 293 | { |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 294 | struct device_node *dn, *cpu_dn; |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 295 | int ret = 0, cpu; |
| 296 | u32 reg, en_mask = 0; |
| 297 | |
| 298 | dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl"); |
| 299 | if (!dn) |
| 300 | return -ENODEV; |
| 301 | |
| 302 | if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) |
| 303 | goto out; |
| 304 | |
| 305 | b15_rac_base = of_iomap(dn, 0); |
| 306 | if (!b15_rac_base) { |
| 307 | pr_err("failed to remap BIU control base\n"); |
| 308 | ret = -ENOMEM; |
| 309 | goto out; |
| 310 | } |
| 311 | |
Florian Fainelli | 48e6dd7 | 2018-02-23 12:41:10 -0800 | [diff] [blame] | 312 | cpu_dn = of_get_cpu_node(0, NULL); |
| 313 | if (!cpu_dn) { |
| 314 | ret = -ENODEV; |
| 315 | goto out; |
| 316 | } |
| 317 | |
| 318 | if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15")) |
| 319 | rac_flush_offset = B15_RAC_FLUSH_REG; |
| 320 | else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53")) |
| 321 | rac_flush_offset = B53_RAC_FLUSH_REG; |
| 322 | else { |
| 323 | pr_err("Unsupported CPU\n"); |
| 324 | of_node_put(cpu_dn); |
| 325 | ret = -EINVAL; |
| 326 | goto out; |
| 327 | } |
| 328 | of_node_put(cpu_dn); |
| 329 | |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 330 | ret = register_reboot_notifier(&b15_rac_reboot_nb); |
| 331 | if (ret) { |
| 332 | pr_err("failed to register reboot notifier\n"); |
| 333 | iounmap(b15_rac_base); |
| 334 | goto out; |
| 335 | } |
| 336 | |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 337 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { |
| 338 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 339 | "arm/cache-b15-rac:dead", |
| 340 | NULL, b15_rac_dead_cpu); |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 341 | if (ret) |
| 342 | goto out_unmap; |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 343 | |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 344 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING, |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 345 | "arm/cache-b15-rac:dying", |
| 346 | NULL, b15_rac_dying_cpu); |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 347 | if (ret) |
| 348 | goto out_cpu_dead; |
| 349 | } |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 350 | |
Arnd Bergmann | a5281fe | 2018-01-08 14:21:49 +0100 | [diff] [blame] | 351 | if (IS_ENABLED(CONFIG_PM_SLEEP)) |
| 352 | register_syscore_ops(&b15_rac_syscore_ops); |
Florian Fainelli | 534f5f3 | 2017-12-01 01:10:12 +0100 | [diff] [blame] | 353 | |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 354 | spin_lock(&rac_lock); |
| 355 | reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); |
| 356 | for_each_possible_cpu(cpu) |
| 357 | en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT)); |
| 358 | WARN(reg & en_mask, "Read-ahead cache not previously disabled\n"); |
| 359 | |
| 360 | b15_rac_enable(); |
| 361 | set_bit(RAC_ENABLED, &b15_rac_flags); |
| 362 | spin_unlock(&rac_lock); |
| 363 | |
| 364 | pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n", |
| 365 | b15_rac_base + RAC_CONFIG0_REG); |
| 366 | |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 367 | goto out; |
| 368 | |
| 369 | out_cpu_dead: |
| 370 | cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING); |
| 371 | out_unmap: |
Florian Fainelli | 576a086 | 2017-12-01 01:10:13 +0100 | [diff] [blame] | 372 | unregister_reboot_notifier(&b15_rac_reboot_nb); |
Florian Fainelli | 55de887 | 2017-12-01 01:10:11 +0100 | [diff] [blame] | 373 | iounmap(b15_rac_base); |
Florian Fainelli | f6f9be1 | 2017-12-01 01:10:09 +0100 | [diff] [blame] | 374 | out: |
| 375 | of_node_put(dn); |
| 376 | return ret; |
| 377 | } |
| 378 | arch_initcall(b15_rac_init); |