Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 1 | /* |
| 2 | * pseries CPU Hotplug infrastructure. |
| 3 | * |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 4 | * Split out from arch/powerpc/platforms/pseries/setup.c |
| 5 | * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 6 | * |
| 7 | * Peter Bergner, IBM March 2001. |
| 8 | * Copyright (C) 2001 IBM. |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 9 | * Dave Engebretsen, Peter Bergner, and |
| 10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com |
| 11 | * Plus various changes from other IBM teams... |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 12 | * |
| 13 | * Copyright (C) 2006 Michael Ellerman, IBM Corporation |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | */ |
| 20 | |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 21 | #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt |
| 22 | |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 23 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 24 | #include <linux/interrupt.h> |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 25 | #include <linux/delay.h> |
Paul Gortmaker | 62fe91b | 2011-05-27 14:25:11 -0400 | [diff] [blame] | 26 | #include <linux/sched.h> /* for idle_task_exit */ |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 27 | #include <linux/cpu.h> |
Nathan Fontenot | 1cf3d8b | 2012-10-02 16:57:57 +0000 | [diff] [blame] | 28 | #include <linux/of.h> |
Nathan Fontenot | ac71380 | 2015-12-16 14:54:05 -0600 | [diff] [blame] | 29 | #include <linux/slab.h> |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 30 | #include <asm/prom.h> |
| 31 | #include <asm/rtas.h> |
| 32 | #include <asm/firmware.h> |
| 33 | #include <asm/machdep.h> |
| 34 | #include <asm/vdso_datapage.h> |
Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 35 | #include <asm/xics.h> |
Deepthi Dharwar | 212bebb | 2013-08-22 15:23:52 +0530 | [diff] [blame] | 36 | #include <asm/plpar_wrappers.h> |
| 37 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 38 | #include "pseries.h" |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 39 | #include "offline_states.h" |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 40 | |
| 41 | /* This version can't take the spinlock, because it never returns */ |
Tony Breeds | 41dd03a | 2014-02-20 21:13:52 +1100 | [diff] [blame] | 42 | static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 43 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 44 | static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = |
| 45 | CPU_STATE_OFFLINE; |
| 46 | static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; |
| 47 | |
| 48 | static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; |
| 49 | |
Kees Cook | 4cc7ecb7 | 2016-03-17 14:23:00 -0700 | [diff] [blame] | 50 | static bool cede_offline_enabled __read_mostly = true; |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * Enable/disable cede_offline when available. |
| 54 | */ |
| 55 | static int __init setup_cede_offline(char *str) |
| 56 | { |
Kees Cook | 4cc7ecb7 | 2016-03-17 14:23:00 -0700 | [diff] [blame] | 57 | return (kstrtobool(str, &cede_offline_enabled) == 0); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | __setup("cede_offline=", setup_cede_offline); |
| 61 | |
| 62 | enum cpu_state_vals get_cpu_current_state(int cpu) |
| 63 | { |
| 64 | return per_cpu(current_state, cpu); |
| 65 | } |
| 66 | |
| 67 | void set_cpu_current_state(int cpu, enum cpu_state_vals state) |
| 68 | { |
| 69 | per_cpu(current_state, cpu) = state; |
| 70 | } |
| 71 | |
| 72 | enum cpu_state_vals get_preferred_offline_state(int cpu) |
| 73 | { |
| 74 | return per_cpu(preferred_offline_state, cpu); |
| 75 | } |
| 76 | |
| 77 | void set_preferred_offline_state(int cpu, enum cpu_state_vals state) |
| 78 | { |
| 79 | per_cpu(preferred_offline_state, cpu) = state; |
| 80 | } |
| 81 | |
| 82 | void set_default_offline_state(int cpu) |
| 83 | { |
| 84 | per_cpu(preferred_offline_state, cpu) = default_offline_state; |
| 85 | } |
| 86 | |
Michael Ellerman | 04da6af | 2006-12-05 17:52:37 +1100 | [diff] [blame] | 87 | static void rtas_stop_self(void) |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 88 | { |
Michael Ellerman | b2e8590 | 2015-11-24 22:26:10 +1100 | [diff] [blame] | 89 | static struct rtas_args args; |
Li Zhong | 4fb8d02 | 2014-04-28 08:29:51 +0800 | [diff] [blame] | 90 | |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 91 | local_irq_disable(); |
| 92 | |
Tony Breeds | 41dd03a | 2014-02-20 21:13:52 +1100 | [diff] [blame] | 93 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 94 | |
| 95 | printk("cpu %u (hwid %u) Ready to die...\n", |
| 96 | smp_processor_id(), hard_smp_processor_id()); |
Michael Ellerman | b2e8590 | 2015-11-24 22:26:10 +1100 | [diff] [blame] | 97 | |
| 98 | rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 99 | |
| 100 | panic("Alas, I survived.\n"); |
| 101 | } |
| 102 | |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 103 | static void pseries_mach_cpu_die(void) |
Michael Ellerman | 04da6af | 2006-12-05 17:52:37 +1100 | [diff] [blame] | 104 | { |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 105 | unsigned int cpu = smp_processor_id(); |
| 106 | unsigned int hwcpu = hard_smp_processor_id(); |
| 107 | u8 cede_latency_hint = 0; |
| 108 | |
Michael Ellerman | 04da6af | 2006-12-05 17:52:37 +1100 | [diff] [blame] | 109 | local_irq_disable(); |
| 110 | idle_task_exit(); |
Nathan Fontenot | c3e8506 | 2008-02-07 07:37:31 +1100 | [diff] [blame] | 111 | xics_teardown_cpu(); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 112 | |
| 113 | if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
| 114 | set_cpu_current_state(cpu, CPU_STATE_INACTIVE); |
Brian King | 32d8ad4 | 2010-07-07 12:31:02 +0000 | [diff] [blame] | 115 | if (ppc_md.suspend_disable_cpu) |
| 116 | ppc_md.suspend_disable_cpu(); |
| 117 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 118 | cede_latency_hint = 2; |
| 119 | |
| 120 | get_lppaca()->idle = 1; |
Anton Blanchard | f13c13a | 2013-08-07 02:01:26 +1000 | [diff] [blame] | 121 | if (!lppaca_shared_proc(get_lppaca())) |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 122 | get_lppaca()->donate_dedicated_cpu = 1; |
| 123 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 124 | while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
Li Zhong | fb91256 | 2012-10-17 21:30:13 +0000 | [diff] [blame] | 125 | while (!prep_irq_for_idle()) { |
| 126 | local_irq_enable(); |
| 127 | local_irq_disable(); |
| 128 | } |
| 129 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 130 | extended_cede_processor(cede_latency_hint); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 131 | } |
| 132 | |
Li Zhong | fb91256 | 2012-10-17 21:30:13 +0000 | [diff] [blame] | 133 | local_irq_disable(); |
| 134 | |
Anton Blanchard | f13c13a | 2013-08-07 02:01:26 +1000 | [diff] [blame] | 135 | if (!lppaca_shared_proc(get_lppaca())) |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 136 | get_lppaca()->donate_dedicated_cpu = 0; |
| 137 | get_lppaca()->idle = 0; |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 138 | |
| 139 | if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { |
Anton Blanchard | 598c823 | 2011-07-25 01:46:34 +0000 | [diff] [blame] | 140 | unregister_slb_shadow(hwcpu); |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 141 | |
Li Zhong | fb91256 | 2012-10-17 21:30:13 +0000 | [diff] [blame] | 142 | hard_irq_disable(); |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 143 | /* |
| 144 | * Call to start_secondary_resume() will not return. |
| 145 | * Kernel stack will be reset and start_secondary() |
| 146 | * will be called to continue the online operation. |
| 147 | */ |
| 148 | start_secondary_resume(); |
| 149 | } |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 150 | } |
| 151 | |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 152 | /* Requested state is CPU_STATE_OFFLINE at this point */ |
| 153 | WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 154 | |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 155 | set_cpu_current_state(cpu, CPU_STATE_OFFLINE); |
Anton Blanchard | 598c823 | 2011-07-25 01:46:34 +0000 | [diff] [blame] | 156 | unregister_slb_shadow(hwcpu); |
Vaidyanathan Srinivasan | 0212f26 | 2010-03-01 02:58:16 +0000 | [diff] [blame] | 157 | rtas_stop_self(); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 158 | |
Michael Ellerman | 04da6af | 2006-12-05 17:52:37 +1100 | [diff] [blame] | 159 | /* Should never get here... */ |
| 160 | BUG(); |
| 161 | for(;;); |
| 162 | } |
| 163 | |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 164 | static int pseries_cpu_disable(void) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 165 | { |
| 166 | int cpu = smp_processor_id(); |
| 167 | |
Rusty Russell | ea0f1ca | 2009-09-24 09:34:48 -0600 | [diff] [blame] | 168 | set_cpu_online(cpu, false); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 169 | vdso_data->processorCount--; |
| 170 | |
| 171 | /*fix boot_cpuid here*/ |
| 172 | if (cpu == boot_cpuid) |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 173 | boot_cpuid = cpumask_any(cpu_online_mask); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 174 | |
| 175 | /* FIXME: abstract this to not be platform specific later on */ |
| 176 | xics_migrate_irqs_away(); |
| 177 | return 0; |
| 178 | } |
| 179 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 180 | /* |
| 181 | * pseries_cpu_die: Wait for the cpu to die. |
| 182 | * @cpu: logical processor id of the CPU whose death we're awaiting. |
| 183 | * |
| 184 | * This function is called from the context of the thread which is performing |
| 185 | * the cpu-offline. Here we wait for long enough to allow the cpu in question |
| 186 | * to self-destroy so that the cpu-offline thread can send the CPU_DEAD |
| 187 | * notifications. |
| 188 | * |
| 189 | * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to |
| 190 | * self-destruct. |
| 191 | */ |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 192 | static void pseries_cpu_die(unsigned int cpu) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 193 | { |
| 194 | int tries; |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 195 | int cpu_status = 1; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 196 | unsigned int pcpu = get_hard_smp_processor_id(cpu); |
| 197 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 198 | if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
| 199 | cpu_status = 1; |
Benjamin Herrenschmidt | 940ce42 | 2010-07-31 15:04:15 +1000 | [diff] [blame] | 200 | for (tries = 0; tries < 5000; tries++) { |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 201 | if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { |
| 202 | cpu_status = 0; |
| 203 | break; |
| 204 | } |
Benjamin Herrenschmidt | 940ce42 | 2010-07-31 15:04:15 +1000 | [diff] [blame] | 205 | msleep(1); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 206 | } |
| 207 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { |
| 208 | |
| 209 | for (tries = 0; tries < 25; tries++) { |
Michael Neuling | f8b6769 | 2010-04-28 13:39:41 +0000 | [diff] [blame] | 210 | cpu_status = smp_query_cpu_stopped(pcpu); |
| 211 | if (cpu_status == QCSS_STOPPED || |
| 212 | cpu_status == QCSS_HARDWARE_ERROR) |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 213 | break; |
| 214 | cpu_relax(); |
| 215 | } |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 216 | } |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 217 | |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 218 | if (cpu_status != 0) { |
| 219 | printk("Querying DEAD? cpu %i (%i) shows %i\n", |
| 220 | cpu, pcpu, cpu_status); |
| 221 | } |
| 222 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 223 | /* Isolation and deallocation are definitely done by |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 224 | * drslot_chrp_cpu. If they were not they would be |
| 225 | * done here. Change isolate state to Isolate and |
| 226 | * change allocation-state to Unusable. |
| 227 | */ |
| 228 | paca[cpu].cpu_start = 0; |
| 229 | } |
| 230 | |
| 231 | /* |
Anton Blanchard | 828a698 | 2010-04-26 15:32:44 +0000 | [diff] [blame] | 232 | * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 233 | * here is that a cpu device node may represent up to two logical cpus |
| 234 | * in the SMT case. We must honor the assumption in other code that |
| 235 | * the logical ids for sibling SMT threads x and y are adjacent, such |
| 236 | * that x^1 == y and y^1 == x. |
| 237 | */ |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 238 | static int pseries_add_processor(struct device_node *np) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 239 | { |
| 240 | unsigned int cpu; |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 241 | cpumask_var_t candidate_mask, tmp; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 242 | int err = -ENOSPC, len, nthreads, i; |
Bharata B Rao | d6f1e7a | 2014-09-16 15:15:45 -0500 | [diff] [blame] | 243 | const __be32 *intserv; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 244 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 245 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 246 | if (!intserv) |
| 247 | return 0; |
| 248 | |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 249 | zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); |
| 250 | zalloc_cpumask_var(&tmp, GFP_KERNEL); |
| 251 | |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 252 | nthreads = len / sizeof(u32); |
| 253 | for (i = 0; i < nthreads; i++) |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 254 | cpumask_set_cpu(i, tmp); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 255 | |
Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 256 | cpu_maps_update_begin(); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 257 | |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 258 | BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 259 | |
| 260 | /* Get a bitmap of unoccupied slots. */ |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 261 | cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); |
| 262 | if (cpumask_empty(candidate_mask)) { |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 263 | /* If we get here, it most likely means that NR_CPUS is |
| 264 | * less than the partition's max processors setting. |
| 265 | */ |
| 266 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" |
| 267 | " supports %d logical cpus.\n", np->full_name, |
Emil Medve | 53a448c | 2015-01-21 16:21:14 -0600 | [diff] [blame] | 268 | num_possible_cpus()); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 269 | goto out_unlock; |
| 270 | } |
| 271 | |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 272 | while (!cpumask_empty(tmp)) |
| 273 | if (cpumask_subset(tmp, candidate_mask)) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 274 | /* Found a range where we can insert the new cpu(s) */ |
| 275 | break; |
| 276 | else |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 277 | cpumask_shift_left(tmp, tmp, nthreads); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 278 | |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 279 | if (cpumask_empty(tmp)) { |
Anton Blanchard | 828a698 | 2010-04-26 15:32:44 +0000 | [diff] [blame] | 280 | printk(KERN_ERR "Unable to find space in cpu_present_mask for" |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 281 | " processor %s with %d thread(s)\n", np->name, |
| 282 | nthreads); |
| 283 | goto out_unlock; |
| 284 | } |
| 285 | |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 286 | for_each_cpu(cpu, tmp) { |
KOSAKI Motohiro | 104699c | 2011-04-28 05:07:23 +0000 | [diff] [blame] | 287 | BUG_ON(cpu_present(cpu)); |
Rusty Russell | ea0f1ca | 2009-09-24 09:34:48 -0600 | [diff] [blame] | 288 | set_cpu_present(cpu, true); |
Bharata B Rao | d6f1e7a | 2014-09-16 15:15:45 -0500 | [diff] [blame] | 289 | set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 290 | } |
| 291 | err = 0; |
| 292 | out_unlock: |
Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 293 | cpu_maps_update_done(); |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 294 | free_cpumask_var(candidate_mask); |
| 295 | free_cpumask_var(tmp); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 296 | return err; |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * Update the present map for a cpu node which is going away, and set |
| 301 | * the hard id in the paca(s) to -1 to be consistent with boot time |
| 302 | * convention for non-present cpus. |
| 303 | */ |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 304 | static void pseries_remove_processor(struct device_node *np) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 305 | { |
| 306 | unsigned int cpu; |
| 307 | int len, nthreads, i; |
Thomas Falcon | e36d122 | 2014-09-12 14:11:42 -0500 | [diff] [blame] | 308 | const __be32 *intserv; |
| 309 | u32 thread; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 310 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 311 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 312 | if (!intserv) |
| 313 | return; |
| 314 | |
| 315 | nthreads = len / sizeof(u32); |
| 316 | |
Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 317 | cpu_maps_update_begin(); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 318 | for (i = 0; i < nthreads; i++) { |
Thomas Falcon | e36d122 | 2014-09-12 14:11:42 -0500 | [diff] [blame] | 319 | thread = be32_to_cpu(intserv[i]); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 320 | for_each_present_cpu(cpu) { |
Thomas Falcon | e36d122 | 2014-09-12 14:11:42 -0500 | [diff] [blame] | 321 | if (get_hard_smp_processor_id(cpu) != thread) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 322 | continue; |
| 323 | BUG_ON(cpu_online(cpu)); |
Rusty Russell | ea0f1ca | 2009-09-24 09:34:48 -0600 | [diff] [blame] | 324 | set_cpu_present(cpu, false); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 325 | set_hard_smp_processor_id(cpu, -1); |
| 326 | break; |
| 327 | } |
Anton Blanchard | 8729faa | 2010-04-26 15:32:42 +0000 | [diff] [blame] | 328 | if (cpu >= nr_cpu_ids) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 329 | printk(KERN_WARNING "Could not find cpu to remove " |
Thomas Falcon | e36d122 | 2014-09-12 14:11:42 -0500 | [diff] [blame] | 330 | "with physical id 0x%x\n", thread); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 331 | } |
Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 332 | cpu_maps_update_done(); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 333 | } |
| 334 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 335 | static int dlpar_online_cpu(struct device_node *dn) |
| 336 | { |
| 337 | int rc = 0; |
| 338 | unsigned int cpu; |
| 339 | int len, nthreads, i; |
| 340 | const __be32 *intserv; |
| 341 | u32 thread; |
| 342 | |
| 343 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
| 344 | if (!intserv) |
| 345 | return -EINVAL; |
| 346 | |
| 347 | nthreads = len / sizeof(u32); |
| 348 | |
| 349 | cpu_maps_update_begin(); |
| 350 | for (i = 0; i < nthreads; i++) { |
| 351 | thread = be32_to_cpu(intserv[i]); |
| 352 | for_each_present_cpu(cpu) { |
| 353 | if (get_hard_smp_processor_id(cpu) != thread) |
| 354 | continue; |
| 355 | BUG_ON(get_cpu_current_state(cpu) |
| 356 | != CPU_STATE_OFFLINE); |
| 357 | cpu_maps_update_done(); |
| 358 | rc = device_online(get_cpu_device(cpu)); |
| 359 | if (rc) |
| 360 | goto out; |
| 361 | cpu_maps_update_begin(); |
| 362 | |
| 363 | break; |
| 364 | } |
| 365 | if (cpu == num_possible_cpus()) |
| 366 | printk(KERN_WARNING "Could not find cpu to online " |
| 367 | "with physical id 0x%x\n", thread); |
| 368 | } |
| 369 | cpu_maps_update_done(); |
| 370 | |
| 371 | out: |
| 372 | return rc; |
| 373 | |
| 374 | } |
| 375 | |
| 376 | static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) |
| 377 | { |
| 378 | struct device_node *child = NULL; |
| 379 | u32 my_drc_index; |
| 380 | bool found; |
| 381 | int rc; |
| 382 | |
| 383 | /* Assume cpu doesn't exist */ |
| 384 | found = false; |
| 385 | |
| 386 | for_each_child_of_node(parent, child) { |
| 387 | rc = of_property_read_u32(child, "ibm,my-drc-index", |
| 388 | &my_drc_index); |
| 389 | if (rc) |
| 390 | continue; |
| 391 | |
| 392 | if (my_drc_index == drc_index) { |
| 393 | of_node_put(child); |
| 394 | found = true; |
| 395 | break; |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | return found; |
| 400 | } |
| 401 | |
Nathan Fontenot | 90edf18 | 2015-12-16 14:55:07 -0600 | [diff] [blame] | 402 | static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) |
| 403 | { |
| 404 | bool found = false; |
| 405 | int rc, index; |
| 406 | |
| 407 | index = 0; |
| 408 | while (!found) { |
| 409 | u32 drc; |
| 410 | |
| 411 | rc = of_property_read_u32_index(parent, "ibm,drc-indexes", |
| 412 | index++, &drc); |
| 413 | if (rc) |
| 414 | break; |
| 415 | |
| 416 | if (drc == drc_index) |
| 417 | found = true; |
| 418 | } |
| 419 | |
| 420 | return found; |
| 421 | } |
| 422 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 423 | static ssize_t dlpar_cpu_add(u32 drc_index) |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 424 | { |
| 425 | struct device_node *dn, *parent; |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 426 | int rc, saved_rc; |
| 427 | |
| 428 | pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 429 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 430 | parent = of_find_node_by_path("/cpus"); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 431 | if (!parent) { |
| 432 | pr_warn("Failed to find CPU root node \"/cpus\"\n"); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 433 | return -ENODEV; |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 434 | } |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 435 | |
| 436 | if (dlpar_cpu_exists(parent, drc_index)) { |
| 437 | of_node_put(parent); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 438 | pr_warn("CPU with drc index %x already exists\n", drc_index); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 439 | return -EINVAL; |
| 440 | } |
| 441 | |
Nathan Fontenot | 90edf18 | 2015-12-16 14:55:07 -0600 | [diff] [blame] | 442 | if (!valid_cpu_drc_index(parent, drc_index)) { |
| 443 | of_node_put(parent); |
| 444 | pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); |
| 445 | return -EINVAL; |
| 446 | } |
| 447 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 448 | rc = dlpar_acquire_drc(drc_index); |
| 449 | if (rc) { |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 450 | pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", |
| 451 | rc, drc_index); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 452 | of_node_put(parent); |
| 453 | return -EINVAL; |
| 454 | } |
| 455 | |
| 456 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); |
| 457 | of_node_put(parent); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 458 | if (!dn) { |
| 459 | pr_warn("Failed call to configure-connector, drc index: %x\n", |
| 460 | drc_index); |
| 461 | dlpar_release_drc(drc_index); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 462 | return -EINVAL; |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 463 | } |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 464 | |
| 465 | rc = dlpar_attach_node(dn); |
| 466 | if (rc) { |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 467 | saved_rc = rc; |
| 468 | pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", |
| 469 | dn->name, rc, drc_index); |
| 470 | |
| 471 | rc = dlpar_release_drc(drc_index); |
| 472 | if (!rc) |
| 473 | dlpar_free_cc_nodes(dn); |
| 474 | |
| 475 | return saved_rc; |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | rc = dlpar_online_cpu(dn); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 479 | if (rc) { |
| 480 | saved_rc = rc; |
| 481 | pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", |
| 482 | dn->name, rc, drc_index); |
| 483 | |
| 484 | rc = dlpar_detach_node(dn); |
| 485 | if (!rc) |
| 486 | dlpar_release_drc(drc_index); |
| 487 | |
| 488 | return saved_rc; |
| 489 | } |
| 490 | |
| 491 | pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, |
| 492 | drc_index); |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 493 | return rc; |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | static int dlpar_offline_cpu(struct device_node *dn) |
| 497 | { |
| 498 | int rc = 0; |
| 499 | unsigned int cpu; |
| 500 | int len, nthreads, i; |
| 501 | const __be32 *intserv; |
| 502 | u32 thread; |
| 503 | |
| 504 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
| 505 | if (!intserv) |
| 506 | return -EINVAL; |
| 507 | |
| 508 | nthreads = len / sizeof(u32); |
| 509 | |
| 510 | cpu_maps_update_begin(); |
| 511 | for (i = 0; i < nthreads; i++) { |
| 512 | thread = be32_to_cpu(intserv[i]); |
| 513 | for_each_present_cpu(cpu) { |
| 514 | if (get_hard_smp_processor_id(cpu) != thread) |
| 515 | continue; |
| 516 | |
| 517 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) |
| 518 | break; |
| 519 | |
| 520 | if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { |
| 521 | set_preferred_offline_state(cpu, |
| 522 | CPU_STATE_OFFLINE); |
| 523 | cpu_maps_update_done(); |
| 524 | rc = device_offline(get_cpu_device(cpu)); |
| 525 | if (rc) |
| 526 | goto out; |
| 527 | cpu_maps_update_begin(); |
| 528 | break; |
| 529 | |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * The cpu is in CPU_STATE_INACTIVE. |
| 534 | * Upgrade it's state to CPU_STATE_OFFLINE. |
| 535 | */ |
| 536 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); |
| 537 | BUG_ON(plpar_hcall_norets(H_PROD, thread) |
| 538 | != H_SUCCESS); |
| 539 | __cpu_die(cpu); |
| 540 | break; |
| 541 | } |
| 542 | if (cpu == num_possible_cpus()) |
| 543 | printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); |
| 544 | } |
| 545 | cpu_maps_update_done(); |
| 546 | |
| 547 | out: |
| 548 | return rc; |
| 549 | |
| 550 | } |
| 551 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 552 | static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) |
| 553 | { |
| 554 | int rc; |
| 555 | |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 556 | pr_debug("Attemping to remove CPU %s, drc index: %x\n", |
| 557 | dn->name, drc_index); |
| 558 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 559 | rc = dlpar_offline_cpu(dn); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 560 | if (rc) { |
| 561 | pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 562 | return -EINVAL; |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 563 | } |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 564 | |
| 565 | rc = dlpar_release_drc(drc_index); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 566 | if (rc) { |
| 567 | pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", |
| 568 | drc_index, dn->name, rc); |
| 569 | dlpar_online_cpu(dn); |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 570 | return rc; |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 571 | } |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 572 | |
| 573 | rc = dlpar_detach_node(dn); |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 574 | if (rc) { |
| 575 | int saved_rc = rc; |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 576 | |
Nathan Fontenot | e666ae0 | 2015-12-16 14:52:39 -0600 | [diff] [blame] | 577 | pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); |
| 578 | |
| 579 | rc = dlpar_acquire_drc(drc_index); |
| 580 | if (!rc) |
| 581 | dlpar_online_cpu(dn); |
| 582 | |
| 583 | return saved_rc; |
| 584 | } |
| 585 | |
| 586 | pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); |
| 587 | return 0; |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 588 | } |
| 589 | |
Nathan Fontenot | ac71380 | 2015-12-16 14:54:05 -0600 | [diff] [blame] | 590 | static struct device_node *cpu_drc_index_to_dn(u32 drc_index) |
| 591 | { |
| 592 | struct device_node *dn; |
| 593 | u32 my_index; |
| 594 | int rc; |
| 595 | |
| 596 | for_each_node_by_type(dn, "cpu") { |
| 597 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); |
| 598 | if (rc) |
| 599 | continue; |
| 600 | |
| 601 | if (my_index == drc_index) |
| 602 | break; |
| 603 | } |
| 604 | |
| 605 | return dn; |
| 606 | } |
| 607 | |
| 608 | static int dlpar_cpu_remove_by_index(u32 drc_index) |
| 609 | { |
| 610 | struct device_node *dn; |
| 611 | int rc; |
| 612 | |
| 613 | dn = cpu_drc_index_to_dn(drc_index); |
| 614 | if (!dn) { |
| 615 | pr_warn("Cannot find CPU (drc index %x) to remove\n", |
| 616 | drc_index); |
| 617 | return -ENODEV; |
| 618 | } |
| 619 | |
| 620 | rc = dlpar_cpu_remove(dn, drc_index); |
| 621 | of_node_put(dn); |
| 622 | return rc; |
| 623 | } |
| 624 | |
| 625 | static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) |
| 626 | { |
| 627 | struct device_node *dn; |
| 628 | int cpus_found = 0; |
| 629 | int rc; |
| 630 | |
| 631 | /* We want to find cpus_to_remove + 1 CPUs to ensure we do not |
| 632 | * remove the last CPU. |
| 633 | */ |
| 634 | for_each_node_by_type(dn, "cpu") { |
| 635 | cpus_found++; |
| 636 | |
| 637 | if (cpus_found > cpus_to_remove) { |
| 638 | of_node_put(dn); |
| 639 | break; |
| 640 | } |
| 641 | |
| 642 | /* Note that cpus_found is always 1 ahead of the index |
| 643 | * into the cpu_drcs array, so we use cpus_found - 1 |
| 644 | */ |
| 645 | rc = of_property_read_u32(dn, "ibm,my-drc-index", |
| 646 | &cpu_drcs[cpus_found - 1]); |
| 647 | if (rc) { |
| 648 | pr_warn("Error occurred getting drc-index for %s\n", |
| 649 | dn->name); |
| 650 | of_node_put(dn); |
| 651 | return -1; |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | if (cpus_found < cpus_to_remove) { |
| 656 | pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", |
| 657 | cpus_found, cpus_to_remove); |
| 658 | } else if (cpus_found == cpus_to_remove) { |
| 659 | pr_warn("Cannot remove all CPUs\n"); |
| 660 | } |
| 661 | |
| 662 | return cpus_found; |
| 663 | } |
| 664 | |
| 665 | static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) |
| 666 | { |
| 667 | u32 *cpu_drcs; |
| 668 | int cpus_found; |
| 669 | int cpus_removed = 0; |
| 670 | int i, rc; |
| 671 | |
| 672 | pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); |
| 673 | |
| 674 | cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); |
| 675 | if (!cpu_drcs) |
| 676 | return -EINVAL; |
| 677 | |
| 678 | cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); |
| 679 | if (cpus_found <= cpus_to_remove) { |
| 680 | kfree(cpu_drcs); |
| 681 | return -EINVAL; |
| 682 | } |
| 683 | |
| 684 | for (i = 0; i < cpus_to_remove; i++) { |
| 685 | rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); |
| 686 | if (rc) |
| 687 | break; |
| 688 | |
| 689 | cpus_removed++; |
| 690 | } |
| 691 | |
| 692 | if (cpus_removed != cpus_to_remove) { |
| 693 | pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); |
| 694 | |
| 695 | for (i = 0; i < cpus_removed; i++) |
| 696 | dlpar_cpu_add(cpu_drcs[i]); |
| 697 | |
| 698 | rc = -EINVAL; |
| 699 | } else { |
| 700 | rc = 0; |
| 701 | } |
| 702 | |
| 703 | kfree(cpu_drcs); |
| 704 | return rc; |
| 705 | } |
| 706 | |
Nathan Fontenot | 90edf18 | 2015-12-16 14:55:07 -0600 | [diff] [blame] | 707 | static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) |
| 708 | { |
| 709 | struct device_node *parent; |
| 710 | int cpus_found = 0; |
| 711 | int index, rc; |
| 712 | |
| 713 | parent = of_find_node_by_path("/cpus"); |
| 714 | if (!parent) { |
| 715 | pr_warn("Could not find CPU root node in device tree\n"); |
| 716 | kfree(cpu_drcs); |
| 717 | return -1; |
| 718 | } |
| 719 | |
| 720 | /* Search the ibm,drc-indexes array for possible CPU drcs to |
| 721 | * add. Note that the format of the ibm,drc-indexes array is |
| 722 | * the number of entries in the array followed by the array |
| 723 | * of drc values so we start looking at index = 1. |
| 724 | */ |
| 725 | index = 1; |
| 726 | while (cpus_found < cpus_to_add) { |
| 727 | u32 drc; |
| 728 | |
| 729 | rc = of_property_read_u32_index(parent, "ibm,drc-indexes", |
| 730 | index++, &drc); |
| 731 | if (rc) |
| 732 | break; |
| 733 | |
| 734 | if (dlpar_cpu_exists(parent, drc)) |
| 735 | continue; |
| 736 | |
| 737 | cpu_drcs[cpus_found++] = drc; |
| 738 | } |
| 739 | |
| 740 | of_node_put(parent); |
| 741 | return cpus_found; |
| 742 | } |
| 743 | |
| 744 | static int dlpar_cpu_add_by_count(u32 cpus_to_add) |
| 745 | { |
| 746 | u32 *cpu_drcs; |
| 747 | int cpus_added = 0; |
| 748 | int cpus_found; |
| 749 | int i, rc; |
| 750 | |
| 751 | pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); |
| 752 | |
| 753 | cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); |
| 754 | if (!cpu_drcs) |
| 755 | return -EINVAL; |
| 756 | |
| 757 | cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); |
| 758 | if (cpus_found < cpus_to_add) { |
| 759 | pr_warn("Failed to find enough CPUs (%d of %d) to add\n", |
| 760 | cpus_found, cpus_to_add); |
| 761 | kfree(cpu_drcs); |
| 762 | return -EINVAL; |
| 763 | } |
| 764 | |
| 765 | for (i = 0; i < cpus_to_add; i++) { |
| 766 | rc = dlpar_cpu_add(cpu_drcs[i]); |
| 767 | if (rc) |
| 768 | break; |
| 769 | |
| 770 | cpus_added++; |
| 771 | } |
| 772 | |
| 773 | if (cpus_added < cpus_to_add) { |
| 774 | pr_warn("CPU hot-add failed, removing any added CPUs\n"); |
| 775 | |
| 776 | for (i = 0; i < cpus_added; i++) |
| 777 | dlpar_cpu_remove_by_index(cpu_drcs[i]); |
| 778 | |
| 779 | rc = -EINVAL; |
| 780 | } else { |
| 781 | rc = 0; |
| 782 | } |
| 783 | |
| 784 | kfree(cpu_drcs); |
| 785 | return rc; |
| 786 | } |
| 787 | |
Nathan Fontenot | ac71380 | 2015-12-16 14:54:05 -0600 | [diff] [blame] | 788 | int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) |
| 789 | { |
| 790 | u32 count, drc_index; |
| 791 | int rc; |
| 792 | |
| 793 | count = hp_elog->_drc_u.drc_count; |
| 794 | drc_index = hp_elog->_drc_u.drc_index; |
| 795 | |
| 796 | lock_device_hotplug(); |
| 797 | |
| 798 | switch (hp_elog->action) { |
| 799 | case PSERIES_HP_ELOG_ACTION_REMOVE: |
| 800 | if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) |
| 801 | rc = dlpar_cpu_remove_by_count(count); |
| 802 | else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) |
| 803 | rc = dlpar_cpu_remove_by_index(drc_index); |
| 804 | else |
| 805 | rc = -EINVAL; |
| 806 | break; |
Nathan Fontenot | 90edf18 | 2015-12-16 14:55:07 -0600 | [diff] [blame] | 807 | case PSERIES_HP_ELOG_ACTION_ADD: |
| 808 | if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) |
| 809 | rc = dlpar_cpu_add_by_count(count); |
| 810 | else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) |
| 811 | rc = dlpar_cpu_add(drc_index); |
| 812 | else |
| 813 | rc = -EINVAL; |
| 814 | break; |
Nathan Fontenot | ac71380 | 2015-12-16 14:54:05 -0600 | [diff] [blame] | 815 | default: |
| 816 | pr_err("Invalid action (%d) specified\n", hp_elog->action); |
| 817 | rc = -EINVAL; |
| 818 | break; |
| 819 | } |
| 820 | |
| 821 | unlock_device_hotplug(); |
| 822 | return rc; |
| 823 | } |
| 824 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 825 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
| 826 | |
| 827 | static ssize_t dlpar_cpu_probe(const char *buf, size_t count) |
| 828 | { |
| 829 | u32 drc_index; |
| 830 | int rc; |
| 831 | |
| 832 | rc = kstrtou32(buf, 0, &drc_index); |
| 833 | if (rc) |
| 834 | return -EINVAL; |
| 835 | |
| 836 | rc = dlpar_cpu_add(drc_index); |
| 837 | |
| 838 | return rc ? rc : count; |
| 839 | } |
| 840 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 841 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) |
| 842 | { |
| 843 | struct device_node *dn; |
| 844 | u32 drc_index; |
| 845 | int rc; |
| 846 | |
| 847 | dn = of_find_node_by_path(buf); |
| 848 | if (!dn) |
| 849 | return -EINVAL; |
| 850 | |
| 851 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); |
| 852 | if (rc) { |
| 853 | of_node_put(dn); |
| 854 | return -EINVAL; |
| 855 | } |
| 856 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 857 | rc = dlpar_cpu_remove(dn, drc_index); |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 858 | of_node_put(dn); |
| 859 | |
Nathan Fontenot | d98389f | 2015-12-16 14:51:26 -0600 | [diff] [blame] | 860 | return rc ? rc : count; |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 861 | } |
| 862 | |
| 863 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ |
| 864 | |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 865 | static int pseries_smp_notifier(struct notifier_block *nb, |
Grant Likely | f5242e5 | 2014-11-24 17:58:01 +0000 | [diff] [blame] | 866 | unsigned long action, void *data) |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 867 | { |
Grant Likely | f5242e5 | 2014-11-24 17:58:01 +0000 | [diff] [blame] | 868 | struct of_reconfig_data *rd = data; |
Akinobu Mita | de2780a | 2011-06-21 03:35:56 +0000 | [diff] [blame] | 869 | int err = 0; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 870 | |
| 871 | switch (action) { |
Nathan Fontenot | 1cf3d8b | 2012-10-02 16:57:57 +0000 | [diff] [blame] | 872 | case OF_RECONFIG_ATTACH_NODE: |
Grant Likely | f5242e5 | 2014-11-24 17:58:01 +0000 | [diff] [blame] | 873 | err = pseries_add_processor(rd->dn); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 874 | break; |
Nathan Fontenot | 1cf3d8b | 2012-10-02 16:57:57 +0000 | [diff] [blame] | 875 | case OF_RECONFIG_DETACH_NODE: |
Grant Likely | f5242e5 | 2014-11-24 17:58:01 +0000 | [diff] [blame] | 876 | pseries_remove_processor(rd->dn); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 877 | break; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 878 | } |
Akinobu Mita | de2780a | 2011-06-21 03:35:56 +0000 | [diff] [blame] | 879 | return notifier_from_errno(err); |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 880 | } |
| 881 | |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 882 | static struct notifier_block pseries_smp_nb = { |
| 883 | .notifier_call = pseries_smp_notifier, |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 884 | }; |
| 885 | |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 886 | #define MAX_CEDE_LATENCY_LEVELS 4 |
| 887 | #define CEDE_LATENCY_PARAM_LENGTH 10 |
| 888 | #define CEDE_LATENCY_PARAM_MAX_LENGTH \ |
| 889 | (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) |
| 890 | #define CEDE_LATENCY_TOKEN 45 |
| 891 | |
| 892 | static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; |
| 893 | |
| 894 | static int parse_cede_parameters(void) |
| 895 | { |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 896 | memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); |
Anton Blanchard | 20a8ab9 | 2010-02-07 13:52:05 +0000 | [diff] [blame] | 897 | return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, |
| 898 | NULL, |
| 899 | CEDE_LATENCY_TOKEN, |
| 900 | __pa(cede_parameters), |
| 901 | CEDE_LATENCY_PARAM_MAX_LENGTH); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 902 | } |
| 903 | |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 904 | static int __init pseries_cpu_hotplug_init(void) |
| 905 | { |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 906 | int cpu; |
Michael Neuling | f8b6769 | 2010-04-28 13:39:41 +0000 | [diff] [blame] | 907 | int qcss_tok; |
Olof Johansson | 64f2758 | 2007-10-10 10:38:24 +1000 | [diff] [blame] | 908 | |
Nathan Fontenot | 183deee | 2015-12-16 14:50:21 -0600 | [diff] [blame] | 909 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
| 910 | ppc_md.cpu_probe = dlpar_cpu_probe; |
| 911 | ppc_md.cpu_release = dlpar_cpu_release; |
| 912 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ |
| 913 | |
Tony Breeds | 41dd03a | 2014-02-20 21:13:52 +1100 | [diff] [blame] | 914 | rtas_stop_self_token = rtas_token("stop-self"); |
Michael Ellerman | 674fa67 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 915 | qcss_tok = rtas_token("query-cpu-stopped-state"); |
| 916 | |
Tony Breeds | 41dd03a | 2014-02-20 21:13:52 +1100 | [diff] [blame] | 917 | if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || |
Michael Ellerman | 674fa67 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 918 | qcss_tok == RTAS_UNKNOWN_SERVICE) { |
| 919 | printk(KERN_INFO "CPU Hotplug not supported by firmware " |
| 920 | "- disabling.\n"); |
| 921 | return 0; |
| 922 | } |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 923 | |
Michael Ellerman | 06ba30b | 2006-12-05 17:52:39 +1100 | [diff] [blame] | 924 | ppc_md.cpu_die = pseries_mach_cpu_die; |
| 925 | smp_ops->cpu_disable = pseries_cpu_disable; |
| 926 | smp_ops->cpu_die = pseries_cpu_die; |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 927 | |
| 928 | /* Processors can be added/removed only on LPAR */ |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 929 | if (firmware_has_feature(FW_FEATURE_LPAR)) { |
Nathan Fontenot | 1cf3d8b | 2012-10-02 16:57:57 +0000 | [diff] [blame] | 930 | of_reconfig_notifier_register(&pseries_smp_nb); |
Gautham R Shenoy | 3aa565f | 2009-10-29 19:22:53 +0000 | [diff] [blame] | 931 | cpu_maps_update_begin(); |
| 932 | if (cede_offline_enabled && parse_cede_parameters() == 0) { |
| 933 | default_offline_state = CPU_STATE_INACTIVE; |
| 934 | for_each_online_cpu(cpu) |
| 935 | set_default_offline_state(cpu); |
| 936 | } |
| 937 | cpu_maps_update_done(); |
| 938 | } |
Michael Ellerman | 413f7c4 | 2006-12-05 17:52:38 +1100 | [diff] [blame] | 939 | |
Michael Ellerman | 0332c2d | 2006-12-05 17:52:36 +1100 | [diff] [blame] | 940 | return 0; |
| 941 | } |
Benjamin Herrenschmidt | d2a3607 | 2013-12-10 11:31:02 +1100 | [diff] [blame] | 942 | machine_arch_initcall(pseries, pseries_cpu_hotplug_init); |