blob: 7fc135399140a5a35842a46476856e7d33e8fda4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002 * CPU subsystem support
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 */
4
Ben Hutchings024f7842012-01-10 02:59:49 +00005#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/module.h>
7#include <linux/init.h>
Al Virof6a57032006-10-18 01:47:25 -04008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070012#include <linux/node.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Thomas Renningerfad12ac2012-01-26 00:09:14 +010014#include <linux/slab.h>
Ben Hutchings9f13a1f2012-01-10 03:04:32 +000015#include <linux/percpu.h>
Rafael J. Wysockiac212b62013-05-03 00:26:22 +020016#include <linux/acpi.h>
Sudeep KarkadaNageshaf86e4712013-06-17 12:58:45 +010017#include <linux/of.h>
Ard Biesheuvel67bad2f2014-02-08 13:34:09 +010018#include <linux/cpufeature.h>
Rik van Riel6570a9a2015-04-24 15:24:28 -040019#include <linux/tick.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Ben Dooksa1bdc7a2005-10-13 17:54:41 +010021#include "base.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Kay Sievers8a25a2f2011-12-21 14:29:42 -080023static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
Ashok Rajad745572005-10-30 14:59:49 -080024
Rafael J. Wysockiac212b62013-05-03 00:26:22 +020025static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
26{
27 /* ACPI style match is the only one that may succeed. */
28 if (acpi_driver_match_device(dev, drv))
29 return 1;
30
31 return 0;
32}
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#ifdef CONFIG_HOTPLUG_CPU
Yasuaki Ishimatsu34640462013-04-29 15:08:50 -070035static void change_cpu_under_node(struct cpu *cpu,
36 unsigned int from_nid, unsigned int to_nid)
37{
38 int cpuid = cpu->dev.id;
39 unregister_cpu_under_node(cpuid, from_nid);
40 register_cpu_under_node(cpuid, to_nid);
41 cpu->node_id = to_nid;
42}
43
Mathias Krauseeda58672015-07-19 20:06:21 +020044static int cpu_subsys_online(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
Kay Sievers8a25a2f2011-12-21 14:29:42 -080046 struct cpu *cpu = container_of(dev, struct cpu, dev);
Rafael J. Wysocki0902a902013-05-03 00:25:49 +020047 int cpuid = dev->id;
Yasuaki Ishimatsu34640462013-04-29 15:08:50 -070048 int from_nid, to_nid;
Toshi Kani6dedcca2013-09-25 15:08:27 -060049 int ret;
Yasuaki Ishimatsu34640462013-04-29 15:08:50 -070050
Rafael J. Wysocki0902a902013-05-03 00:25:49 +020051 from_nid = cpu_to_node(cpuid);
Rafael J. Wysockic7991b02013-08-13 02:39:30 +020052 if (from_nid == NUMA_NO_NODE)
Toshi Kani6dedcca2013-09-25 15:08:27 -060053 return -ENODEV;
Rafael J. Wysockic7991b02013-08-13 02:39:30 +020054
Rafael J. Wysocki0902a902013-05-03 00:25:49 +020055 ret = cpu_up(cpuid);
56 /*
57 * When hot adding memory to memoryless node and enabling a cpu
58 * on the node, node number of the cpu may internally change.
59 */
60 to_nid = cpu_to_node(cpuid);
61 if (from_nid != to_nid)
62 change_cpu_under_node(cpu, from_nid, to_nid);
Yasuaki Ishimatsu34640462013-04-29 15:08:50 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 return ret;
65}
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Rafael J. Wysocki0902a902013-05-03 00:25:49 +020067static int cpu_subsys_offline(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
Toshi Kani6dedcca2013-09-25 15:08:27 -060069 return cpu_down(dev->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
Igor Mammedov1c4e2d72013-05-14 16:46:07 +020071
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070072void unregister_cpu(struct cpu *cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
Kay Sievers8a25a2f2011-12-21 14:29:42 -080074 int logical_cpu = cpu->dev.id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070076 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
77
Kay Sievers8a25a2f2011-12-21 14:29:42 -080078 device_unregister(&cpu->dev);
Mike Travise37d05d2008-05-01 04:35:16 -070079 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 return;
81}
Nathan Fontenot12633e82009-11-25 17:23:25 +000082
83#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
Kay Sievers8a25a2f2011-12-21 14:29:42 -080084static ssize_t cpu_probe_store(struct device *dev,
85 struct device_attribute *attr,
Stephen Rothwell67fc2332010-03-16 10:33:32 +110086 const char *buf,
Nathan Fontenot12633e82009-11-25 17:23:25 +000087 size_t count)
88{
Toshi Kani574b8512013-08-29 18:22:07 -060089 ssize_t cnt;
90 int ret;
91
92 ret = lock_device_hotplug_sysfs();
93 if (ret)
94 return ret;
95
96 cnt = arch_cpu_probe(buf, count);
97
98 unlock_device_hotplug();
99 return cnt;
Nathan Fontenot12633e82009-11-25 17:23:25 +0000100}
101
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800102static ssize_t cpu_release_store(struct device *dev,
103 struct device_attribute *attr,
Stephen Rothwell67fc2332010-03-16 10:33:32 +1100104 const char *buf,
Nathan Fontenot12633e82009-11-25 17:23:25 +0000105 size_t count)
106{
Toshi Kani574b8512013-08-29 18:22:07 -0600107 ssize_t cnt;
108 int ret;
109
110 ret = lock_device_hotplug_sysfs();
111 if (ret)
112 return ret;
113
114 cnt = arch_cpu_release(buf, count);
115
116 unlock_device_hotplug();
117 return cnt;
Nathan Fontenot12633e82009-11-25 17:23:25 +0000118}
119
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800120static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
121static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
Nathan Fontenot12633e82009-11-25 17:23:25 +0000122#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#endif /* CONFIG_HOTPLUG_CPU */
124
Rafael J. Wysocki0902a902013-05-03 00:25:49 +0200125struct bus_type cpu_subsys = {
126 .name = "cpu",
127 .dev_name = "cpu",
Rafael J. Wysockiac212b62013-05-03 00:26:22 +0200128 .match = cpu_subsys_match,
Rafael J. Wysocki0902a902013-05-03 00:25:49 +0200129#ifdef CONFIG_HOTPLUG_CPU
130 .online = cpu_subsys_online,
131 .offline = cpu_subsys_offline,
132#endif
133};
134EXPORT_SYMBOL_GPL(cpu_subsys);
135
Vivek Goyal51be5602006-01-09 20:51:42 -0800136#ifdef CONFIG_KEXEC
137#include <linux/kexec.h>
138
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800139static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
Andi Kleen4a0b2b42008-07-01 18:48:41 +0200140 char *buf)
Vivek Goyal51be5602006-01-09 20:51:42 -0800141{
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800142 struct cpu *cpu = container_of(dev, struct cpu, dev);
Vivek Goyal51be5602006-01-09 20:51:42 -0800143 ssize_t rc;
144 unsigned long long addr;
145 int cpunum;
146
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800147 cpunum = cpu->dev.id;
Vivek Goyal51be5602006-01-09 20:51:42 -0800148
149 /*
150 * Might be reading other cpu's data based on which cpu read thread
151 * has been scheduled. But cpu data (memory) is allocated once during
152 * boot up and this data does not change there after. Hence this
153 * operation should be safe. No locking required.
154 */
Vivek Goyal3b034b02009-11-24 15:50:03 +0900155 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
Vivek Goyal51be5602006-01-09 20:51:42 -0800156 rc = sprintf(buf, "%Lx\n", addr);
Vivek Goyal51be5602006-01-09 20:51:42 -0800157 return rc;
158}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800159static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
Zhang Yanfeieca45492013-03-28 16:15:35 +0800160
161static ssize_t show_crash_notes_size(struct device *dev,
162 struct device_attribute *attr,
163 char *buf)
164{
165 ssize_t rc;
166
Arnd Bergmannbcfb87f2013-04-03 15:18:24 +0000167 rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
Zhang Yanfeieca45492013-03-28 16:15:35 +0800168 return rc;
169}
170static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
Igor Mammedovc055da92013-05-14 16:46:06 +0200171
172static struct attribute *crash_note_cpu_attrs[] = {
173 &dev_attr_crash_notes.attr,
174 &dev_attr_crash_notes_size.attr,
175 NULL
176};
177
178static struct attribute_group crash_note_cpu_attr_group = {
179 .attrs = crash_note_cpu_attrs,
180};
Vivek Goyal51be5602006-01-09 20:51:42 -0800181#endif
182
Olav Haugan16bb5932016-06-02 18:06:10 -0700183#ifdef CONFIG_HOTPLUG_CPU
184
Olav Hauganeeafb782017-02-09 16:46:45 -0800185static ssize_t isolate_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
Olav Haugan16bb5932016-06-02 18:06:10 -0700187{
188 struct cpu *cpu = container_of(dev, struct cpu, dev);
189 ssize_t rc;
190 int cpuid = cpu->dev.id;
191 unsigned int isolated = cpu_isolated(cpuid);
192
193 rc = snprintf(buf, PAGE_SIZE-2, "%d\n", isolated);
194
195 return rc;
196}
197
Olav Hauganeeafb782017-02-09 16:46:45 -0800198static DEVICE_ATTR_RO(isolate);
Olav Haugan16bb5932016-06-02 18:06:10 -0700199
200static struct attribute *cpu_isolated_attrs[] = {
201 &dev_attr_isolate.attr,
202 NULL
203};
204
205static struct attribute_group cpu_isolated_attr_group = {
206 .attrs = cpu_isolated_attrs,
207};
208
209#endif
210
Joonwoo Park858d5752017-08-21 12:09:49 -0700211static ssize_t show_sched_load_boost(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 ssize_t rc;
215 unsigned int boost;
216 struct cpu *cpu = container_of(dev, struct cpu, dev);
217 int cpuid = cpu->dev.id;
218
219 boost = per_cpu(sched_load_boost, cpuid);
220 rc = snprintf(buf, PAGE_SIZE-2, "%d\n", boost);
221
222 return rc;
223}
224
225static ssize_t __ref store_sched_load_boost(struct device *dev,
226 struct device_attribute *attr,
227 const char *buf, size_t count)
228{
229 int err;
230 int boost;
231 struct cpu *cpu = container_of(dev, struct cpu, dev);
232 int cpuid = cpu->dev.id;
233
234 err = kstrtoint(strstrip((char *)buf), 0, &boost);
235 if (err)
236 return err;
237
238 /*
239 * -100 is low enough to cancel out CPU's load and make it near zro.
240 * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
241 * can take without overflow.
242 */
243 if (boost < -100 || boost > 1000)
244 return -EINVAL;
245
246 per_cpu(sched_load_boost, cpuid) = boost;
247
248 return count;
249}
250
251static DEVICE_ATTR(sched_load_boost, 0644,
252 show_sched_load_boost,
253 store_sched_load_boost);
254
255static struct attribute *sched_cpu_attrs[] = {
256 &dev_attr_sched_load_boost.attr,
257 NULL
258};
259
260static struct attribute_group sched_cpu_attr_group = {
261 .attrs = sched_cpu_attrs,
262};
263
Igor Mammedovc055da92013-05-14 16:46:06 +0200264static const struct attribute_group *common_cpu_attr_groups[] = {
265#ifdef CONFIG_KEXEC
266 &crash_note_cpu_attr_group,
267#endif
Olav Haugan16bb5932016-06-02 18:06:10 -0700268#ifdef CONFIG_HOTPLUG_CPU
269 &cpu_isolated_attr_group,
270#endif
Joonwoo Park858d5752017-08-21 12:09:49 -0700271 &sched_cpu_attr_group,
Igor Mammedovc055da92013-05-14 16:46:06 +0200272 NULL
273};
274
Igor Mammedov1c4e2d72013-05-14 16:46:07 +0200275static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
276#ifdef CONFIG_KEXEC
277 &crash_note_cpu_attr_group,
278#endif
Olav Haugan16bb5932016-06-02 18:06:10 -0700279#ifdef CONFIG_HOTPLUG_CPU
280 &cpu_isolated_attr_group,
281#endif
Joonwoo Park858d5752017-08-21 12:09:49 -0700282 &sched_cpu_attr_group,
Igor Mammedov1c4e2d72013-05-14 16:46:07 +0200283 NULL
284};
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/*
Mike Travis9d1fe3232008-04-08 11:43:04 -0700287 * Print cpu online, possible, present, and system maps
288 */
Andi Kleen265d2e22010-01-05 12:48:00 +0100289
290struct cpu_attr {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800291 struct device_attribute attr;
Rasmus Villemoes848e2392016-01-20 15:00:22 -0800292 const struct cpumask *const map;
Andi Kleen265d2e22010-01-05 12:48:00 +0100293};
294
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800295static ssize_t show_cpus_attr(struct device *dev,
296 struct device_attribute *attr,
Andi Kleen265d2e22010-01-05 12:48:00 +0100297 char *buf)
Mike Travis9d1fe3232008-04-08 11:43:04 -0700298{
Andi Kleen265d2e22010-01-05 12:48:00 +0100299 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
Mike Travis9d1fe3232008-04-08 11:43:04 -0700300
Rasmus Villemoes848e2392016-01-20 15:00:22 -0800301 return cpumap_print_to_pagebuf(true, buf, ca->map);
Mike Travis9d1fe3232008-04-08 11:43:04 -0700302}
303
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800304#define _CPU_ATTR(name, map) \
305 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
Mike Travis9d1fe3232008-04-08 11:43:04 -0700306
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800307/* Keep in sync with cpu_subsys_attrs */
Andi Kleen265d2e22010-01-05 12:48:00 +0100308static struct cpu_attr cpu_attrs[] = {
Rasmus Villemoes848e2392016-01-20 15:00:22 -0800309 _CPU_ATTR(online, &__cpu_online_mask),
310 _CPU_ATTR(possible, &__cpu_possible_mask),
311 _CPU_ATTR(present, &__cpu_present_mask),
Sabyasachi Singhac7e2d32017-04-03 11:57:15 -0700312 _CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask),
Andi Kleen265d2e22010-01-05 12:48:00 +0100313};
Mike Travis9d1fe3232008-04-08 11:43:04 -0700314
Mike Travise057d7a2008-12-15 20:26:48 -0800315/*
316 * Print values for NR_CPUS and offlined cpus
317 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800318static ssize_t print_cpus_kernel_max(struct device *dev,
319 struct device_attribute *attr, char *buf)
Mike Travise057d7a2008-12-15 20:26:48 -0800320{
Mike Travis8fd2d2d2008-12-31 18:08:48 -0800321 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
Mike Travise057d7a2008-12-15 20:26:48 -0800322 return n;
323}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800324static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
Mike Travise057d7a2008-12-15 20:26:48 -0800325
326/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
327unsigned int total_cpus;
328
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800329static ssize_t print_cpus_offline(struct device *dev,
330 struct device_attribute *attr, char *buf)
Mike Travise057d7a2008-12-15 20:26:48 -0800331{
332 int n = 0, len = PAGE_SIZE-2;
333 cpumask_var_t offline;
334
335 /* display offline cpus < nr_cpu_ids */
336 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
337 return -ENOMEM;
Jan Beulichcdc6e3d2010-04-27 14:01:20 -0700338 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
Tejun Heof799b1a2015-02-13 14:37:56 -0800339 n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
Mike Travise057d7a2008-12-15 20:26:48 -0800340 free_cpumask_var(offline);
341
342 /* display offline cpus >= nr_cpu_ids */
343 if (total_cpus && nr_cpu_ids < total_cpus) {
344 if (n && n < len)
345 buf[n++] = ',';
346
347 if (nr_cpu_ids == total_cpus-1)
348 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
349 else
350 n += snprintf(&buf[n], len - n, "%d-%d",
351 nr_cpu_ids, total_cpus-1);
352 }
353
354 n += snprintf(&buf[n], len - n, "\n");
355 return n;
356}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800357static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
Mike Travise057d7a2008-12-15 20:26:48 -0800358
Rik van Riel59f30ab2015-04-24 15:24:27 -0400359static ssize_t print_cpus_isolated(struct device *dev,
360 struct device_attribute *attr, char *buf)
361{
362 int n = 0, len = PAGE_SIZE-2;
363
364 n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map));
365
366 return n;
367}
368static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
369
Rik van Riel6570a9a2015-04-24 15:24:28 -0400370#ifdef CONFIG_NO_HZ_FULL
371static ssize_t print_cpus_nohz_full(struct device *dev,
372 struct device_attribute *attr, char *buf)
373{
374 int n = 0, len = PAGE_SIZE-2;
375
376 n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
377
378 return n;
379}
380static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
381#endif
382
Greg Kroah-Hartman2885e252012-02-02 10:36:33 -0800383static void cpu_device_release(struct device *dev)
384{
385 /*
386 * This is an empty function to prevent the driver core from spitting a
387 * warning at us. Yes, I know this is directly opposite of what the
388 * documentation for the driver core and kobjects say, and the author
389 * of this code has already been publically ridiculed for doing
390 * something as foolish as this. However, at this point in time, it is
391 * the only way to handle the issue of statically allocated cpu
392 * devices. The different architectures will have their cpu device
393 * code reworked to properly handle this in the near future, so this
394 * function will then be changed to correctly free up the memory held
395 * by the cpu device.
396 *
397 * Never copy this way of doing things, or you too will be made fun of
Ralf Baechle30a48402013-01-15 15:27:46 +0100398 * on the linux-kernel list, you have been warned.
Greg Kroah-Hartman2885e252012-02-02 10:36:33 -0800399 */
400}
401
Ard Biesheuvel67bad2f2014-02-08 13:34:09 +0100402#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
403static ssize_t print_cpu_modalias(struct device *dev,
404 struct device_attribute *attr,
405 char *buf)
406{
407 ssize_t n;
408 u32 i;
409
410 n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
411 CPU_FEATURE_TYPEVAL);
412
413 for (i = 0; i < MAX_CPU_FEATURES; i++)
414 if (cpu_have_feature(i)) {
415 if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
416 WARN(1, "CPU features overflow page\n");
417 break;
418 }
419 n += sprintf(&buf[n], ",%04X", i);
420 }
421 buf[n++] = '\n';
422 return n;
423}
Ard Biesheuvel67bad2f2014-02-08 13:34:09 +0100424
425static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
426{
427 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
428 if (buf) {
429 print_cpu_modalias(NULL, NULL, buf);
430 add_uevent_var(env, "MODALIAS=%s", buf);
431 kfree(buf);
432 }
433 return 0;
434}
435#endif
436
Mike Travis9d1fe3232008-04-08 11:43:04 -0700437/*
Robert P. J. Day405ae7d2007-02-17 19:13:42 +0100438 * register_cpu - Setup a sysfs device for a CPU.
Siddha, Suresh B72486f12006-12-07 02:14:10 +0100439 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
440 * sysfs for this CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 * @num - CPU number to use when creating the device.
442 *
443 * Initialize and register the CPU device.
444 */
Paul Gortmakera83048e2013-06-19 15:22:41 -0400445int register_cpu(struct cpu *cpu, int num)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
447 int error;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 cpu->node_id = cpu_to_node(num);
Greg Kroah-Hartman29bb5d42012-02-08 15:11:17 -0800450 memset(&cpu->dev, 0x00, sizeof(struct device));
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800451 cpu->dev.id = num;
452 cpu->dev.bus = &cpu_subsys;
Greg Kroah-Hartman2885e252012-02-02 10:36:33 -0800453 cpu->dev.release = cpu_device_release;
Rafael J. Wysocki0902a902013-05-03 00:25:49 +0200454 cpu->dev.offline_disabled = !cpu->hotpluggable;
Toshi Kani1001b4d2013-05-30 00:30:05 +0200455 cpu->dev.offline = !cpu_online(num);
Sudeep KarkadaNageshaf86e4712013-06-17 12:58:45 +0100456 cpu->dev.of_node = of_get_cpu_node(num, NULL);
Ard Biesheuvel2b9c1f02014-02-08 13:34:10 +0100457#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
Ard Biesheuvel67bad2f2014-02-08 13:34:09 +0100458 cpu->dev.bus->uevent = cpu_uevent;
Thomas Renningerfad12ac2012-01-26 00:09:14 +0100459#endif
Igor Mammedovc055da92013-05-14 16:46:06 +0200460 cpu->dev.groups = common_cpu_attr_groups;
Igor Mammedov1c4e2d72013-05-14 16:46:07 +0200461 if (cpu->hotpluggable)
462 cpu->dev.groups = hotplugable_cpu_attr_groups;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800463 error = device_register(&cpu->dev);
Alex Shi59fffa32016-08-25 16:42:39 +0800464 if (error)
465 return error;
Vivek Goyal51be5602006-01-09 20:51:42 -0800466
Alex Shi59fffa32016-08-25 16:42:39 +0800467 per_cpu(cpu_sys_devices, num) = &cpu->dev;
468 register_cpu_under_node(num, cpu_to_node(num));
469
470 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471}
472
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800473struct device *get_cpu_device(unsigned cpu)
Ashok Rajad745572005-10-30 14:59:49 -0800474{
Mike Travise37d05d2008-05-01 04:35:16 -0700475 if (cpu < nr_cpu_ids && cpu_possible(cpu))
476 return per_cpu(cpu_sys_devices, cpu);
Ashok Rajad745572005-10-30 14:59:49 -0800477 else
478 return NULL;
479}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800480EXPORT_SYMBOL_GPL(get_cpu_device);
481
Sudeep Holla3d529432014-09-30 14:48:24 +0100482static void device_create_release(struct device *dev)
483{
484 kfree(dev);
485}
486
487static struct device *
488__cpu_device_create(struct device *parent, void *drvdata,
489 const struct attribute_group **groups,
490 const char *fmt, va_list args)
491{
492 struct device *dev = NULL;
493 int retval = -ENODEV;
494
495 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
496 if (!dev) {
497 retval = -ENOMEM;
498 goto error;
499 }
500
501 device_initialize(dev);
502 dev->parent = parent;
503 dev->groups = groups;
504 dev->release = device_create_release;
505 dev_set_drvdata(dev, drvdata);
506
507 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
508 if (retval)
509 goto error;
510
511 retval = device_add(dev);
512 if (retval)
513 goto error;
514
515 return dev;
516
517error:
518 put_device(dev);
519 return ERR_PTR(retval);
520}
521
522struct device *cpu_device_create(struct device *parent, void *drvdata,
523 const struct attribute_group **groups,
524 const char *fmt, ...)
525{
526 va_list vargs;
527 struct device *dev;
528
529 va_start(vargs, fmt);
530 dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
531 va_end(vargs);
532 return dev;
533}
534EXPORT_SYMBOL_GPL(cpu_device_create);
535
Ard Biesheuvel2b9c1f02014-02-08 13:34:10 +0100536#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
Ard Biesheuvel67bad2f2014-02-08 13:34:09 +0100537static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
Thomas Renningerfad12ac2012-01-26 00:09:14 +0100538#endif
539
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800540static struct attribute *cpu_root_attrs[] = {
541#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
542 &dev_attr_probe.attr,
543 &dev_attr_release.attr,
544#endif
545 &cpu_attrs[0].attr.attr,
546 &cpu_attrs[1].attr.attr,
547 &cpu_attrs[2].attr.attr,
Sabyasachi Singhac7e2d32017-04-03 11:57:15 -0700548 &cpu_attrs[3].attr.attr,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800549 &dev_attr_kernel_max.attr,
550 &dev_attr_offline.attr,
Rik van Riel59f30ab2015-04-24 15:24:27 -0400551 &dev_attr_isolated.attr,
Rik van Riel6570a9a2015-04-24 15:24:28 -0400552#ifdef CONFIG_NO_HZ_FULL
553 &dev_attr_nohz_full.attr,
554#endif
Ard Biesheuvel2b9c1f02014-02-08 13:34:10 +0100555#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
Thomas Renningerfad12ac2012-01-26 00:09:14 +0100556 &dev_attr_modalias.attr,
557#endif
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800558 NULL
559};
560
561static struct attribute_group cpu_root_attr_group = {
562 .attrs = cpu_root_attrs,
563};
564
565static const struct attribute_group *cpu_root_attr_groups[] = {
566 &cpu_root_attr_group,
567 NULL,
568};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Josh Triplett29875572011-12-03 13:06:50 -0800570bool cpu_is_hotpluggable(unsigned cpu)
571{
Linus Torvalds7affca32012-01-07 12:03:30 -0800572 struct device *dev = get_cpu_device(cpu);
573 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
Josh Triplett29875572011-12-03 13:06:50 -0800574}
575EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
576
Ben Hutchings9f13a1f2012-01-10 03:04:32 +0000577#ifdef CONFIG_GENERIC_CPU_DEVICES
578static DEFINE_PER_CPU(struct cpu, cpu_devices);
579#endif
580
581static void __init cpu_dev_register_generic(void)
582{
583#ifdef CONFIG_GENERIC_CPU_DEVICES
584 int i;
585
586 for_each_possible_cpu(i) {
587 if (register_cpu(&per_cpu(cpu_devices, i), i))
588 panic("Failed to register CPU device");
589 }
590#endif
591}
592
Thomas Gleixner11ec2df2018-01-07 22:48:00 +0100593#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
594
595ssize_t __weak cpu_show_meltdown(struct device *dev,
596 struct device_attribute *attr, char *buf)
597{
598 return sprintf(buf, "Not affected\n");
599}
600
601ssize_t __weak cpu_show_spectre_v1(struct device *dev,
602 struct device_attribute *attr, char *buf)
603{
604 return sprintf(buf, "Not affected\n");
605}
606
607ssize_t __weak cpu_show_spectre_v2(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 return sprintf(buf, "Not affected\n");
611}
612
Konrad Rzeszutek Wilk24e4dd92018-04-25 22:04:20 -0400613ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
614 struct device_attribute *attr, char *buf)
615{
616 return sprintf(buf, "Not affected\n");
617}
618
Andi Kleen432e99b2018-06-13 15:48:26 -0700619ssize_t __weak cpu_show_l1tf(struct device *dev,
620 struct device_attribute *attr, char *buf)
621{
622 return sprintf(buf, "Not affected\n");
623}
624
Thomas Gleixnerba08d562019-02-18 22:51:43 +0100625ssize_t __weak cpu_show_mds(struct device *dev,
626 struct device_attribute *attr, char *buf)
627{
628 return sprintf(buf, "Not affected\n");
629}
630
Pawan Gupta9392b2d2019-10-23 12:19:51 +0200631ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
632 struct device_attribute *attr,
633 char *buf)
634{
635 return sprintf(buf, "Not affected\n");
636}
637
Vineela Tummalapalli12ceedb2019-11-04 12:22:01 +0100638ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
639 struct device_attribute *attr, char *buf)
640{
641 return sprintf(buf, "Not affected\n");
642}
643
Thomas Gleixner11ec2df2018-01-07 22:48:00 +0100644static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
645static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
646static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
Konrad Rzeszutek Wilk24e4dd92018-04-25 22:04:20 -0400647static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
Andi Kleen432e99b2018-06-13 15:48:26 -0700648static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
Thomas Gleixnerba08d562019-02-18 22:51:43 +0100649static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
Pawan Gupta9392b2d2019-10-23 12:19:51 +0200650static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
Vineela Tummalapalli12ceedb2019-11-04 12:22:01 +0100651static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
Thomas Gleixner11ec2df2018-01-07 22:48:00 +0100652
653static struct attribute *cpu_root_vulnerabilities_attrs[] = {
654 &dev_attr_meltdown.attr,
655 &dev_attr_spectre_v1.attr,
656 &dev_attr_spectre_v2.attr,
Konrad Rzeszutek Wilk24e4dd92018-04-25 22:04:20 -0400657 &dev_attr_spec_store_bypass.attr,
Andi Kleen432e99b2018-06-13 15:48:26 -0700658 &dev_attr_l1tf.attr,
Thomas Gleixnerba08d562019-02-18 22:51:43 +0100659 &dev_attr_mds.attr,
Pawan Gupta9392b2d2019-10-23 12:19:51 +0200660 &dev_attr_tsx_async_abort.attr,
Vineela Tummalapalli12ceedb2019-11-04 12:22:01 +0100661 &dev_attr_itlb_multihit.attr,
Thomas Gleixner11ec2df2018-01-07 22:48:00 +0100662 NULL
663};
664
665static const struct attribute_group cpu_root_vulnerabilities_group = {
666 .name = "vulnerabilities",
667 .attrs = cpu_root_vulnerabilities_attrs,
668};
669
670static void __init cpu_register_vulnerabilities(void)
671{
672 if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
673 &cpu_root_vulnerabilities_group))
674 pr_err("Unable to register CPU vulnerabilities\n");
675}
676
677#else
678static inline void cpu_register_vulnerabilities(void) { }
679#endif
680
Ben Hutchings024f7842012-01-10 02:59:49 +0000681void __init cpu_dev_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
Ben Hutchings024f7842012-01-10 02:59:49 +0000683 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
684 panic("Failed to register CPU subsystem");
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -0700685
Ben Hutchings9f13a1f2012-01-10 03:04:32 +0000686 cpu_dev_register_generic();
Thomas Gleixner11ec2df2018-01-07 22:48:00 +0100687 cpu_register_vulnerabilities();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688}