Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver |
| 3 | * |
| 4 | * Created by: Nicolas Pitre, March 2012 |
| 5 | * Copyright: (C) 2012-2013 Linaro Limited |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 12 | #include <linux/atomic.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 16 | #include <linux/sched/signal.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 17 | #include <uapi/linux/sched/types.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/cpu_pm.h> |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 20 | #include <linux/cpu.h> |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 21 | #include <linux/cpumask.h> |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/wait.h> |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 24 | #include <linux/time.h> |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 25 | #include <linux/clockchips.h> |
| 26 | #include <linux/hrtimer.h> |
| 27 | #include <linux/tick.h> |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 28 | #include <linux/notifier.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 29 | #include <linux/mm.h> |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 30 | #include <linux/mutex.h> |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 31 | #include <linux/smp.h> |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 32 | #include <linux/spinlock.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 33 | #include <linux/string.h> |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 34 | #include <linux/sysfs.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 35 | #include <linux/irqchip/arm-gic.h> |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 36 | #include <linux/moduleparam.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 37 | |
| 38 | #include <asm/smp_plat.h> |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 39 | #include <asm/cputype.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 40 | #include <asm/suspend.h> |
| 41 | #include <asm/mcpm.h> |
| 42 | #include <asm/bL_switcher.h> |
| 43 | |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 44 | #define CREATE_TRACE_POINTS |
| 45 | #include <trace/events/power_cpu_migrate.h> |
| 46 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 47 | |
| 48 | /* |
| 49 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have |
| 50 | * __attribute_const__ and we don't want the compiler to assume any |
| 51 | * constness here as the value _does_ change along some code paths. |
| 52 | */ |
| 53 | |
| 54 | static int read_mpidr(void) |
| 55 | { |
| 56 | unsigned int id; |
| 57 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); |
| 58 | return id & MPIDR_HWID_BITMASK; |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * bL switcher core code. |
| 63 | */ |
| 64 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 65 | static void bL_do_switch(void *_arg) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 66 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 67 | unsigned ib_mpidr, ib_cpu, ib_cluster; |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 68 | long volatile handshake, **handshake_ptr = _arg; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 69 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 70 | pr_debug("%s\n", __func__); |
| 71 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 72 | ib_mpidr = cpu_logical_map(smp_processor_id()); |
| 73 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
| 74 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 75 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 76 | /* Advertise our handshake location */ |
| 77 | if (handshake_ptr) { |
| 78 | handshake = 0; |
| 79 | *handshake_ptr = &handshake; |
| 80 | } else |
| 81 | handshake = -1; |
| 82 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 83 | /* |
| 84 | * Our state has been saved at this point. Let's release our |
| 85 | * inbound CPU. |
| 86 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 87 | mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 88 | sev(); |
| 89 | |
| 90 | /* |
| 91 | * From this point, we must assume that our counterpart CPU might |
| 92 | * have taken over in its parallel world already, as if execution |
| 93 | * just returned from cpu_suspend(). It is therefore important to |
| 94 | * be very careful not to make any change the other guy is not |
| 95 | * expecting. This is why we need stack isolation. |
| 96 | * |
| 97 | * Fancy under cover tasks could be performed here. For now |
| 98 | * we have none. |
| 99 | */ |
| 100 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 101 | /* |
| 102 | * Let's wait until our inbound is alive. |
| 103 | */ |
| 104 | while (!handshake) { |
| 105 | wfe(); |
| 106 | smp_mb(); |
| 107 | } |
| 108 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 109 | /* Let's put ourself down. */ |
| 110 | mcpm_cpu_power_down(); |
| 111 | |
| 112 | /* should never get here */ |
| 113 | BUG(); |
| 114 | } |
| 115 | |
| 116 | /* |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 117 | * Stack isolation. To ensure 'current' remains valid, we just use another |
| 118 | * piece of our thread's stack space which should be fairly lightly used. |
| 119 | * The selected area starts just above the thread_info structure located |
| 120 | * at the very bottom of the stack, aligned to a cache line, and indexed |
| 121 | * with the cluster number. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 122 | */ |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 123 | #define STACK_SIZE 512 |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 124 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
| 125 | static int bL_switchpoint(unsigned long _arg) |
| 126 | { |
| 127 | unsigned int mpidr = read_mpidr(); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 128 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 129 | void *stack = current_thread_info() + 1; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 130 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 131 | stack += clusterid * STACK_SIZE + STACK_SIZE; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 132 | call_with_stack(bL_do_switch, (void *)_arg, stack); |
| 133 | BUG(); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Generic switcher interface |
| 138 | */ |
| 139 | |
Nicolas Pitre | ed96762 | 2012-07-05 21:33:26 -0400 | [diff] [blame] | 140 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 141 | static int bL_switcher_cpu_pairing[NR_CPUS]; |
Nicolas Pitre | ed96762 | 2012-07-05 21:33:26 -0400 | [diff] [blame] | 142 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 143 | /* |
| 144 | * bL_switch_to - Switch to a specific cluster for the current CPU |
| 145 | * @new_cluster_id: the ID of the cluster to switch to. |
| 146 | * |
| 147 | * This function must be called on the CPU to be switched. |
| 148 | * Returns 0 on success, else a negative status code. |
| 149 | */ |
| 150 | static int bL_switch_to(unsigned int new_cluster_id) |
| 151 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 152 | unsigned int mpidr, this_cpu, that_cpu; |
| 153 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 154 | struct completion inbound_alive; |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 155 | long volatile *handshake_ptr; |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 156 | int ipi_nr, ret; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 157 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 158 | this_cpu = smp_processor_id(); |
| 159 | ob_mpidr = read_mpidr(); |
| 160 | ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); |
| 161 | ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); |
| 162 | BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 163 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 164 | if (new_cluster_id == ob_cluster) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 165 | return 0; |
| 166 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 167 | that_cpu = bL_switcher_cpu_pairing[this_cpu]; |
| 168 | ib_mpidr = cpu_logical_map(that_cpu); |
| 169 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
| 170 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
| 171 | |
| 172 | pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", |
| 173 | this_cpu, ob_mpidr, ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 174 | |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 175 | this_cpu = smp_processor_id(); |
| 176 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 177 | /* Close the gate for our entry vectors */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 178 | mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); |
| 179 | mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 180 | |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 181 | /* Install our "inbound alive" notifier. */ |
| 182 | init_completion(&inbound_alive); |
| 183 | ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); |
| 184 | ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); |
| 185 | mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); |
| 186 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 187 | /* |
| 188 | * Let's wake up the inbound CPU now in case it requires some delay |
| 189 | * to come online, but leave it gated in our entry vector code. |
| 190 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 191 | ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 192 | if (ret) { |
| 193 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); |
| 194 | return ret; |
| 195 | } |
| 196 | |
| 197 | /* |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 198 | * Raise a SGI on the inbound CPU to make sure it doesn't stall |
| 199 | * in a possible WFI, such as in bL_power_down(). |
| 200 | */ |
| 201 | gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); |
| 202 | |
| 203 | /* |
| 204 | * Wait for the inbound to come up. This allows for other |
| 205 | * tasks to be scheduled in the mean time. |
| 206 | */ |
| 207 | wait_for_completion(&inbound_alive); |
| 208 | mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); |
| 209 | |
| 210 | /* |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 211 | * From this point we are entering the switch critical zone |
| 212 | * and can't take any interrupts anymore. |
| 213 | */ |
| 214 | local_irq_disable(); |
| 215 | local_fiq_disable(); |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 216 | trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 217 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 218 | /* redirect GIC's SGIs to our counterpart */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 219 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 220 | |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 221 | tick_suspend_local(); |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 222 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 223 | ret = cpu_pm_enter(); |
| 224 | |
| 225 | /* we can not tolerate errors at this point */ |
| 226 | if (ret) |
| 227 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); |
| 228 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 229 | /* Swap the physical CPUs in the logical map for this logical CPU. */ |
| 230 | cpu_logical_map(this_cpu) = ib_mpidr; |
| 231 | cpu_logical_map(that_cpu) = ob_mpidr; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 232 | |
| 233 | /* Let's do the actual CPU switch. */ |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 234 | ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 235 | if (ret > 0) |
| 236 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); |
| 237 | |
| 238 | /* We are executing on the inbound CPU at this point */ |
| 239 | mpidr = read_mpidr(); |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 240 | pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); |
| 241 | BUG_ON(mpidr != ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 242 | |
| 243 | mcpm_cpu_powered_up(); |
| 244 | |
| 245 | ret = cpu_pm_exit(); |
| 246 | |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 247 | tick_resume_local(); |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 248 | |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 249 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 250 | local_fiq_enable(); |
| 251 | local_irq_enable(); |
| 252 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 253 | *handshake_ptr = 1; |
| 254 | dsb_sev(); |
| 255 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 256 | if (ret) |
| 257 | pr_err("%s exiting with error %d\n", __func__, ret); |
| 258 | return ret; |
| 259 | } |
| 260 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 261 | struct bL_thread { |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 262 | spinlock_t lock; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 263 | struct task_struct *task; |
| 264 | wait_queue_head_t wq; |
| 265 | int wanted_cluster; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 266 | struct completion started; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 267 | bL_switch_completion_handler completer; |
| 268 | void *completer_cookie; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 269 | }; |
| 270 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 271 | static struct bL_thread bL_threads[NR_CPUS]; |
| 272 | |
| 273 | static int bL_switcher_thread(void *arg) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 274 | { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 275 | struct bL_thread *t = arg; |
| 276 | struct sched_param param = { .sched_priority = 1 }; |
| 277 | int cluster; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 278 | bL_switch_completion_handler completer; |
| 279 | void *completer_cookie; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 280 | |
| 281 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 282 | complete(&t->started); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 283 | |
| 284 | do { |
| 285 | if (signal_pending(current)) |
| 286 | flush_signals(current); |
| 287 | wait_event_interruptible(t->wq, |
| 288 | t->wanted_cluster != -1 || |
| 289 | kthread_should_stop()); |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 290 | |
| 291 | spin_lock(&t->lock); |
| 292 | cluster = t->wanted_cluster; |
| 293 | completer = t->completer; |
| 294 | completer_cookie = t->completer_cookie; |
| 295 | t->wanted_cluster = -1; |
| 296 | t->completer = NULL; |
| 297 | spin_unlock(&t->lock); |
| 298 | |
| 299 | if (cluster != -1) { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 300 | bL_switch_to(cluster); |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 301 | |
| 302 | if (completer) |
| 303 | completer(completer_cookie); |
| 304 | } |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 305 | } while (!kthread_should_stop()); |
| 306 | |
| 307 | return 0; |
| 308 | } |
| 309 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 310 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 311 | { |
| 312 | struct task_struct *task; |
| 313 | |
| 314 | task = kthread_create_on_node(bL_switcher_thread, arg, |
| 315 | cpu_to_node(cpu), "kswitcher_%d", cpu); |
| 316 | if (!IS_ERR(task)) { |
| 317 | kthread_bind(task, cpu); |
| 318 | wake_up_process(task); |
| 319 | } else |
| 320 | pr_err("%s failed for CPU %d\n", __func__, cpu); |
| 321 | return task; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | /* |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 325 | * bL_switch_request_cb - Switch to a specific cluster for the given CPU, |
| 326 | * with completion notification via a callback |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 327 | * |
| 328 | * @cpu: the CPU to switch |
| 329 | * @new_cluster_id: the ID of the cluster to switch to. |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 330 | * @completer: switch completion callback. if non-NULL, |
| 331 | * @completer(@completer_cookie) will be called on completion of |
| 332 | * the switch, in non-atomic context. |
| 333 | * @completer_cookie: opaque context argument for @completer. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 334 | * |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 335 | * This function causes a cluster switch on the given CPU by waking up |
| 336 | * the appropriate switcher thread. This function may or may not return |
| 337 | * before the switch has occurred. |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 338 | * |
| 339 | * If a @completer callback function is supplied, it will be called when |
| 340 | * the switch is complete. This can be used to determine asynchronously |
| 341 | * when the switch is complete, regardless of when bL_switch_request() |
| 342 | * returns. When @completer is supplied, no new switch request is permitted |
| 343 | * for the affected CPU until after the switch is complete, and @completer |
| 344 | * has returned. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 345 | */ |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 346 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, |
| 347 | bL_switch_completion_handler completer, |
| 348 | void *completer_cookie) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 349 | { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 350 | struct bL_thread *t; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 351 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 352 | if (cpu >= ARRAY_SIZE(bL_threads)) { |
| 353 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); |
| 354 | return -EINVAL; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 355 | } |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 356 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 357 | t = &bL_threads[cpu]; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 358 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 359 | if (IS_ERR(t->task)) |
| 360 | return PTR_ERR(t->task); |
| 361 | if (!t->task) |
| 362 | return -ESRCH; |
| 363 | |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 364 | spin_lock(&t->lock); |
| 365 | if (t->completer) { |
| 366 | spin_unlock(&t->lock); |
| 367 | return -EBUSY; |
| 368 | } |
| 369 | t->completer = completer; |
| 370 | t->completer_cookie = completer_cookie; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 371 | t->wanted_cluster = new_cluster_id; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 372 | spin_unlock(&t->lock); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 373 | wake_up(&t->wq); |
| 374 | return 0; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 375 | } |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 376 | EXPORT_SYMBOL_GPL(bL_switch_request_cb); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 377 | |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 378 | /* |
| 379 | * Activation and configuration code. |
| 380 | */ |
| 381 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 382 | static DEFINE_MUTEX(bL_switcher_activation_lock); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 383 | static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 384 | static unsigned int bL_switcher_active; |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 385 | static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 386 | static cpumask_t bL_switcher_removed_logical_cpus; |
| 387 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 388 | int bL_switcher_register_notifier(struct notifier_block *nb) |
| 389 | { |
| 390 | return blocking_notifier_chain_register(&bL_activation_notifier, nb); |
| 391 | } |
| 392 | EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); |
| 393 | |
| 394 | int bL_switcher_unregister_notifier(struct notifier_block *nb) |
| 395 | { |
| 396 | return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); |
| 397 | } |
| 398 | EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); |
| 399 | |
| 400 | static int bL_activation_notify(unsigned long val) |
| 401 | { |
| 402 | int ret; |
| 403 | |
| 404 | ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); |
| 405 | if (ret & NOTIFY_STOP_MASK) |
| 406 | pr_err("%s: notifier chain failed with status 0x%x\n", |
| 407 | __func__, ret); |
| 408 | return notifier_to_errno(ret); |
| 409 | } |
| 410 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 411 | static void bL_switcher_restore_cpus(void) |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 412 | { |
| 413 | int i; |
| 414 | |
Nicolas Pitre | 3f8517e | 2014-05-23 22:31:44 +0100 | [diff] [blame] | 415 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) { |
| 416 | struct device *cpu_dev = get_cpu_device(i); |
| 417 | int ret = device_online(cpu_dev); |
| 418 | if (ret) |
| 419 | dev_err(cpu_dev, "switcher: unable to restore CPU\n"); |
| 420 | } |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 421 | } |
| 422 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 423 | static int bL_switcher_halve_cpus(void) |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 424 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 425 | int i, j, cluster_0, gic_id, ret; |
| 426 | unsigned int cpu, cluster, mask; |
| 427 | cpumask_t available_cpus; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 428 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 429 | /* First pass to validate what we have */ |
| 430 | mask = 0; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 431 | for_each_online_cpu(i) { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 432 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
| 433 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 434 | if (cluster >= 2) { |
| 435 | pr_err("%s: only dual cluster systems are supported\n", __func__); |
| 436 | return -EINVAL; |
| 437 | } |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 438 | if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) |
| 439 | return -EINVAL; |
| 440 | mask |= (1 << cluster); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 441 | } |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 442 | if (mask != 3) { |
| 443 | pr_err("%s: no CPU pairing possible\n", __func__); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 444 | return -EINVAL; |
| 445 | } |
| 446 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 447 | /* |
| 448 | * Now let's do the pairing. We match each CPU with another CPU |
| 449 | * from a different cluster. To get a uniform scheduling behavior |
| 450 | * without fiddling with CPU topology and compute capacity data, |
| 451 | * we'll use logical CPUs initially belonging to the same cluster. |
| 452 | */ |
| 453 | memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); |
| 454 | cpumask_copy(&available_cpus, cpu_online_mask); |
| 455 | cluster_0 = -1; |
| 456 | for_each_cpu(i, &available_cpus) { |
| 457 | int match = -1; |
| 458 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
| 459 | if (cluster_0 == -1) |
| 460 | cluster_0 = cluster; |
| 461 | if (cluster != cluster_0) |
| 462 | continue; |
| 463 | cpumask_clear_cpu(i, &available_cpus); |
| 464 | for_each_cpu(j, &available_cpus) { |
| 465 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 466 | /* |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 467 | * Let's remember the last match to create "odd" |
| 468 | * pairings on purpose in order for other code not |
| 469 | * to assume any relation between physical and |
| 470 | * logical CPU numbers. |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 471 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 472 | if (cluster != cluster_0) |
| 473 | match = j; |
| 474 | } |
| 475 | if (match != -1) { |
| 476 | bL_switcher_cpu_pairing[i] = match; |
| 477 | cpumask_clear_cpu(match, &available_cpus); |
| 478 | pr_info("CPU%d paired with CPU%d\n", i, match); |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | /* |
| 483 | * Now we disable the unwanted CPUs i.e. everything that has no |
| 484 | * pairing information (that includes the pairing counterparts). |
| 485 | */ |
| 486 | cpumask_clear(&bL_switcher_removed_logical_cpus); |
| 487 | for_each_online_cpu(i) { |
| 488 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
| 489 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
| 490 | |
| 491 | /* Let's take note of the GIC ID for this CPU */ |
| 492 | gic_id = gic_get_cpu_id(i); |
| 493 | if (gic_id < 0) { |
| 494 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); |
| 495 | bL_switcher_restore_cpus(); |
| 496 | return -EINVAL; |
| 497 | } |
| 498 | bL_gic_id[cpu][cluster] = gic_id; |
| 499 | pr_info("GIC ID for CPU %u cluster %u is %u\n", |
| 500 | cpu, cluster, gic_id); |
| 501 | |
| 502 | if (bL_switcher_cpu_pairing[i] != -1) { |
| 503 | bL_switcher_cpu_original_cluster[i] = cluster; |
| 504 | continue; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 505 | } |
| 506 | |
Nicolas Pitre | 3f8517e | 2014-05-23 22:31:44 +0100 | [diff] [blame] | 507 | ret = device_offline(get_cpu_device(i)); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 508 | if (ret) { |
| 509 | bL_switcher_restore_cpus(); |
| 510 | return ret; |
| 511 | } |
| 512 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); |
| 513 | } |
| 514 | |
| 515 | return 0; |
| 516 | } |
| 517 | |
Dave Martin | d08e2e0 | 2013-02-13 16:20:44 +0000 | [diff] [blame] | 518 | /* Determine the logical CPU a given physical CPU is grouped on. */ |
| 519 | int bL_switcher_get_logical_index(u32 mpidr) |
| 520 | { |
| 521 | int cpu; |
| 522 | |
| 523 | if (!bL_switcher_active) |
| 524 | return -EUNATCH; |
| 525 | |
| 526 | mpidr &= MPIDR_HWID_BITMASK; |
| 527 | for_each_online_cpu(cpu) { |
| 528 | int pairing = bL_switcher_cpu_pairing[cpu]; |
| 529 | if (pairing == -1) |
| 530 | continue; |
| 531 | if ((mpidr == cpu_logical_map(cpu)) || |
| 532 | (mpidr == cpu_logical_map(pairing))) |
| 533 | return cpu; |
| 534 | } |
| 535 | return -EINVAL; |
| 536 | } |
| 537 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 538 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) |
| 539 | { |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 540 | trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 541 | } |
| 542 | |
Dave Martin | 29064b8 | 2013-02-11 14:39:19 +0000 | [diff] [blame] | 543 | int bL_switcher_trace_trigger(void) |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 544 | { |
| 545 | int ret; |
| 546 | |
| 547 | preempt_disable(); |
| 548 | |
| 549 | bL_switcher_trace_trigger_cpu(NULL); |
| 550 | ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); |
| 551 | |
| 552 | preempt_enable(); |
| 553 | |
| 554 | return ret; |
| 555 | } |
Dave Martin | 29064b8 | 2013-02-11 14:39:19 +0000 | [diff] [blame] | 556 | EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 557 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 558 | static int bL_switcher_enable(void) |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 559 | { |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 560 | int cpu, ret; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 561 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 562 | mutex_lock(&bL_switcher_activation_lock); |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 563 | lock_device_hotplug(); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 564 | if (bL_switcher_active) { |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 565 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 566 | mutex_unlock(&bL_switcher_activation_lock); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 567 | return 0; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 568 | } |
| 569 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 570 | pr_info("big.LITTLE switcher initializing\n"); |
| 571 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 572 | ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); |
| 573 | if (ret) |
| 574 | goto error; |
| 575 | |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 576 | ret = bL_switcher_halve_cpus(); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 577 | if (ret) |
| 578 | goto error; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 579 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 580 | bL_switcher_trace_trigger(); |
| 581 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 582 | for_each_online_cpu(cpu) { |
| 583 | struct bL_thread *t = &bL_threads[cpu]; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 584 | spin_lock_init(&t->lock); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 585 | init_waitqueue_head(&t->wq); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 586 | init_completion(&t->started); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 587 | t->wanted_cluster = -1; |
| 588 | t->task = bL_switcher_thread_create(cpu, t); |
| 589 | } |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 590 | |
| 591 | bL_switcher_active = 1; |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 592 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 593 | pr_info("big.LITTLE switcher initialized\n"); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 594 | goto out; |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 595 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 596 | error: |
| 597 | pr_warn("big.LITTLE switcher initialization failed\n"); |
| 598 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
| 599 | |
| 600 | out: |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 601 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 602 | mutex_unlock(&bL_switcher_activation_lock); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 603 | return ret; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 604 | } |
| 605 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 606 | #ifdef CONFIG_SYSFS |
| 607 | |
| 608 | static void bL_switcher_disable(void) |
| 609 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 610 | unsigned int cpu, cluster; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 611 | struct bL_thread *t; |
| 612 | struct task_struct *task; |
| 613 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 614 | mutex_lock(&bL_switcher_activation_lock); |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 615 | lock_device_hotplug(); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 616 | |
| 617 | if (!bL_switcher_active) |
| 618 | goto out; |
| 619 | |
| 620 | if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { |
| 621 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
| 622 | goto out; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 623 | } |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 624 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 625 | bL_switcher_active = 0; |
| 626 | |
| 627 | /* |
| 628 | * To deactivate the switcher, we must shut down the switcher |
| 629 | * threads to prevent any other requests from being accepted. |
| 630 | * Then, if the final cluster for given logical CPU is not the |
| 631 | * same as the original one, we'll recreate a switcher thread |
| 632 | * just for the purpose of switching the CPU back without any |
| 633 | * possibility for interference from external requests. |
| 634 | */ |
| 635 | for_each_online_cpu(cpu) { |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 636 | t = &bL_threads[cpu]; |
| 637 | task = t->task; |
| 638 | t->task = NULL; |
| 639 | if (!task || IS_ERR(task)) |
| 640 | continue; |
| 641 | kthread_stop(task); |
| 642 | /* no more switch may happen on this CPU at this point */ |
| 643 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
| 644 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
| 645 | continue; |
| 646 | init_completion(&t->started); |
| 647 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; |
| 648 | task = bL_switcher_thread_create(cpu, t); |
| 649 | if (!IS_ERR(task)) { |
| 650 | wait_for_completion(&t->started); |
| 651 | kthread_stop(task); |
| 652 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
| 653 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
| 654 | continue; |
| 655 | } |
| 656 | /* If execution gets here, we're in trouble. */ |
| 657 | pr_crit("%s: unable to restore original cluster for CPU %d\n", |
| 658 | __func__, cpu); |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 659 | pr_crit("%s: CPU %d can't be restored\n", |
| 660 | __func__, bL_switcher_cpu_pairing[cpu]); |
| 661 | cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], |
| 662 | &bL_switcher_removed_logical_cpus); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | bL_switcher_restore_cpus(); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 666 | bL_switcher_trace_trigger(); |
| 667 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 668 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
| 669 | |
| 670 | out: |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 671 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 672 | mutex_unlock(&bL_switcher_activation_lock); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 673 | } |
| 674 | |
| 675 | static ssize_t bL_switcher_active_show(struct kobject *kobj, |
| 676 | struct kobj_attribute *attr, char *buf) |
| 677 | { |
| 678 | return sprintf(buf, "%u\n", bL_switcher_active); |
| 679 | } |
| 680 | |
| 681 | static ssize_t bL_switcher_active_store(struct kobject *kobj, |
| 682 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 683 | { |
| 684 | int ret; |
| 685 | |
| 686 | switch (buf[0]) { |
| 687 | case '0': |
| 688 | bL_switcher_disable(); |
| 689 | ret = 0; |
| 690 | break; |
| 691 | case '1': |
| 692 | ret = bL_switcher_enable(); |
| 693 | break; |
| 694 | default: |
| 695 | ret = -EINVAL; |
| 696 | } |
| 697 | |
| 698 | return (ret >= 0) ? count : ret; |
| 699 | } |
| 700 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 701 | static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, |
| 702 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 703 | { |
| 704 | int ret = bL_switcher_trace_trigger(); |
| 705 | |
| 706 | return ret ? ret : count; |
| 707 | } |
| 708 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 709 | static struct kobj_attribute bL_switcher_active_attr = |
| 710 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); |
| 711 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 712 | static struct kobj_attribute bL_switcher_trace_trigger_attr = |
| 713 | __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); |
| 714 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 715 | static struct attribute *bL_switcher_attrs[] = { |
| 716 | &bL_switcher_active_attr.attr, |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 717 | &bL_switcher_trace_trigger_attr.attr, |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 718 | NULL, |
| 719 | }; |
| 720 | |
| 721 | static struct attribute_group bL_switcher_attr_group = { |
| 722 | .attrs = bL_switcher_attrs, |
| 723 | }; |
| 724 | |
| 725 | static struct kobject *bL_switcher_kobj; |
| 726 | |
| 727 | static int __init bL_switcher_sysfs_init(void) |
| 728 | { |
| 729 | int ret; |
| 730 | |
| 731 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); |
| 732 | if (!bL_switcher_kobj) |
| 733 | return -ENOMEM; |
| 734 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); |
| 735 | if (ret) |
| 736 | kobject_put(bL_switcher_kobj); |
| 737 | return ret; |
| 738 | } |
| 739 | |
| 740 | #endif /* CONFIG_SYSFS */ |
| 741 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 742 | bool bL_switcher_get_enabled(void) |
| 743 | { |
| 744 | mutex_lock(&bL_switcher_activation_lock); |
| 745 | |
| 746 | return bL_switcher_active; |
| 747 | } |
| 748 | EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); |
| 749 | |
| 750 | void bL_switcher_put_enabled(void) |
| 751 | { |
| 752 | mutex_unlock(&bL_switcher_activation_lock); |
| 753 | } |
| 754 | EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); |
| 755 | |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 756 | /* |
| 757 | * Veto any CPU hotplug operation on those CPUs we've removed |
| 758 | * while the switcher is active. |
| 759 | * We're just not ready to deal with that given the trickery involved. |
| 760 | */ |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 761 | static int bL_switcher_cpu_pre(unsigned int cpu) |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 762 | { |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 763 | int pairing; |
| 764 | |
| 765 | if (!bL_switcher_active) |
| 766 | return 0; |
| 767 | |
| 768 | pairing = bL_switcher_cpu_pairing[cpu]; |
| 769 | |
| 770 | if (pairing == -1) |
| 771 | return -EINVAL; |
| 772 | return 0; |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 773 | } |
| 774 | |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 775 | static bool no_bL_switcher; |
| 776 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); |
| 777 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 778 | static int __init bL_switcher_init(void) |
| 779 | { |
| 780 | int ret; |
| 781 | |
Nicolas Pitre | 4530e4b | 2014-04-22 00:25:35 +0100 | [diff] [blame] | 782 | if (!mcpm_is_available()) |
| 783 | return -ENODEV; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 784 | |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 785 | cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare", |
| 786 | bL_switcher_cpu_pre, NULL); |
| 787 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown", |
| 788 | NULL, bL_switcher_cpu_pre); |
| 789 | if (ret < 0) { |
| 790 | cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE); |
| 791 | pr_err("bL_switcher: Failed to allocate a hotplug state\n"); |
| 792 | return ret; |
| 793 | } |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 794 | if (!no_bL_switcher) { |
| 795 | ret = bL_switcher_enable(); |
| 796 | if (ret) |
| 797 | return ret; |
| 798 | } |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 799 | |
| 800 | #ifdef CONFIG_SYSFS |
| 801 | ret = bL_switcher_sysfs_init(); |
| 802 | if (ret) |
| 803 | pr_err("%s: unable to create sysfs entry\n", __func__); |
| 804 | #endif |
| 805 | |
| 806 | return 0; |
| 807 | } |
| 808 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 809 | late_initcall(bL_switcher_init); |