blob: aa8fc3f03ed555a4e8ce0d074013a88e1fbb3b56 [file] [log] [blame]
Vincent Guittotc9018aa2011-08-08 13:21:59 +01001/*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/cpumask.h>
Arnd Bergmann92bdd3f2013-05-31 22:49:22 +010016#include <linux/export.h>
Vincent Guittotc9018aa2011-08-08 13:21:59 +010017#include <linux/init.h>
18#include <linux/percpu.h>
19#include <linux/node.h>
20#include <linux/nodemask.h>
Vincent Guittot339ca092012-07-10 14:13:12 +010021#include <linux/of.h>
Vincent Guittotc9018aa2011-08-08 13:21:59 +010022#include <linux/sched.h>
Vincent Guittot339ca092012-07-10 14:13:12 +010023#include <linux/slab.h>
Maria Yue5a586c2018-02-13 12:59:23 +080024#include <linux/sched_energy.h>
Vincent Guittotc9018aa2011-08-08 13:21:59 +010025
26#include <asm/cputype.h>
27#include <asm/topology.h>
28
Vincent Guittot130d9aa2012-07-10 14:08:40 +010029/*
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -040030 * cpu capacity scale management
Vincent Guittot130d9aa2012-07-10 14:08:40 +010031 */
32
33/*
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -040034 * cpu capacity table
Vincent Guittot130d9aa2012-07-10 14:08:40 +010035 * This per cpu data structure describes the relative capacity of each core.
36 * On a heteregenous system, cores don't have the same computation capacity
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -040037 * and we reflect that difference in the cpu_capacity field so the scheduler
38 * can take this difference into account during load balance. A per cpu
39 * structure is preferred because each CPU updates its own cpu_capacity field
40 * during the load balance except for idle cores. One idle core is selected
41 * to run the rebalance_domains for all idle cores and the cpu_capacity can be
42 * updated during this sequence.
Vincent Guittot130d9aa2012-07-10 14:08:40 +010043 */
Juri Lellid78e13a2016-01-07 16:27:33 +010044static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
Vincent Guittot130d9aa2012-07-10 14:08:40 +010045
Maria Yu1dc40b02017-09-26 16:50:56 +080046unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
47{
48 return per_cpu(cpu_scale, cpu);
49}
50
51static void set_power_scale(unsigned int cpu, unsigned long power)
52{
53 per_cpu(cpu_scale, cpu) = power;
54}
55
Morten Rasmussen25cea242015-04-14 16:25:31 +010056unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
Vincent Guittot130d9aa2012-07-10 14:08:40 +010057{
58 return per_cpu(cpu_scale, cpu);
59}
60
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -040061static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
Vincent Guittot130d9aa2012-07-10 14:08:40 +010062{
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -040063 per_cpu(cpu_scale, cpu) = capacity;
Vincent Guittot130d9aa2012-07-10 14:08:40 +010064}
65
Jeevan Shriram490837a2017-03-01 17:52:49 -080066static int __init get_cpu_for_node(struct device_node *node)
67{
68 struct device_node *cpu_node;
69 int cpu;
70
71 cpu_node = of_parse_phandle(node, "cpu", 0);
72 if (!cpu_node)
73 return -EINVAL;
74
75 for_each_possible_cpu(cpu) {
76 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
77 of_node_put(cpu_node);
78 return cpu;
79 }
80 }
81
82 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
83
84 of_node_put(cpu_node);
85 return -EINVAL;
86}
87
88static int __init parse_core(struct device_node *core, int cluster_id,
89 int core_id)
90{
91 char name[10];
92 bool leaf = true;
93 int i = 0;
94 int cpu;
95 struct device_node *t;
96
97 do {
98 snprintf(name, sizeof(name), "thread%d", i);
99 t = of_get_child_by_name(core, name);
100 if (t) {
101 leaf = false;
102 cpu = get_cpu_for_node(t);
103 if (cpu >= 0) {
104 cpu_topology[cpu].socket_id = cluster_id;
105 cpu_topology[cpu].core_id = core_id;
106 cpu_topology[cpu].thread_id = i;
107 } else {
108 pr_err("%s: Can't get CPU for thread\n",
109 t->full_name);
110 of_node_put(t);
111 return -EINVAL;
112 }
113 of_node_put(t);
114 }
115 i++;
116 } while (t);
117
118 cpu = get_cpu_for_node(core);
119 if (cpu >= 0) {
120 if (!leaf) {
121 pr_err("%s: Core has both threads and CPU\n",
122 core->full_name);
123 return -EINVAL;
124 }
125
126 cpu_topology[cpu].socket_id = cluster_id;
127 cpu_topology[cpu].core_id = core_id;
128 } else if (leaf) {
129 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
130 return -EINVAL;
131 }
132
133 return 0;
134}
135
136static int __init parse_cluster(struct device_node *cluster, int depth)
137{
138 static int cluster_id __initdata;
139 char name[10];
140 bool leaf = true;
141 bool has_cores = false;
142 struct device_node *c;
143 int core_id = 0;
144 int i, ret;
145
146 /*
147 * First check for child clusters; we currently ignore any
148 * information about the nesting of clusters and present the
149 * scheduler with a flat list of them.
150 */
151 i = 0;
152 do {
153 snprintf(name, sizeof(name), "cluster%d", i);
154 c = of_get_child_by_name(cluster, name);
155 if (c) {
156 leaf = false;
157 ret = parse_cluster(c, depth + 1);
158 of_node_put(c);
159 if (ret != 0)
160 return ret;
161 }
162 i++;
163 } while (c);
164
165 /* Now check for cores */
166 i = 0;
167 do {
168 snprintf(name, sizeof(name), "core%d", i);
169 c = of_get_child_by_name(cluster, name);
170 if (c) {
171 has_cores = true;
172
173 if (depth == 0) {
174 pr_err("%s: cpu-map children should be clusters\n",
175 c->full_name);
176 of_node_put(c);
177 return -EINVAL;
178 }
179
180 if (leaf) {
181 ret = parse_core(c, cluster_id, core_id++);
182 } else {
183 pr_err("%s: Non-leaf cluster with core %s\n",
184 cluster->full_name, name);
185 ret = -EINVAL;
186 }
187
188 of_node_put(c);
189 if (ret != 0)
190 return ret;
191 }
192 i++;
193 } while (c);
194
195 if (leaf && !has_cores)
196 pr_warn("%s: empty cluster\n", cluster->full_name);
197
198 if (leaf)
199 cluster_id++;
200
201 return 0;
202}
203
Srivatsa Vaddagirifd5704a2014-03-31 19:42:27 -0700204static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
205
206unsigned long arch_get_cpu_efficiency(int cpu)
207{
208 return per_cpu(cpu_efficiency, cpu);
209}
Trilok Sonib10a8a82016-06-10 14:49:06 -0700210EXPORT_SYMBOL(arch_get_cpu_efficiency);
Srivatsa Vaddagirifd5704a2014-03-31 19:42:27 -0700211
Vincent Guittot339ca092012-07-10 14:13:12 +0100212#ifdef CONFIG_OF
213struct cpu_efficiency {
214 const char *compatible;
215 unsigned long efficiency;
216};
217
218/*
219 * Table of relative efficiency of each processors
220 * The efficiency value must fit in 20bit and the final
221 * cpu_scale value must be in the range
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400222 * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
Vincent Guittot339ca092012-07-10 14:13:12 +0100223 * in order to return at most 1 when DIV_ROUND_CLOSEST
224 * is used to compute the capacity of a CPU.
225 * Processors that are not defined in the table,
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400226 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
Vincent Guittot339ca092012-07-10 14:13:12 +0100227 */
Mark Brown145bc292013-12-10 12:10:17 +0100228static const struct cpu_efficiency table_efficiency[] = {
Vincent Guittot339ca092012-07-10 14:13:12 +0100229 {"arm,cortex-a15", 3891},
230 {"arm,cortex-a7", 2048},
231 {NULL, },
232};
233
Mark Brown145bc292013-12-10 12:10:17 +0100234static unsigned long *__cpu_capacity;
Sudeep KarkadaNagesha816a8de2013-06-17 14:20:00 +0100235#define cpu_capacity(cpu) __cpu_capacity[cpu]
Vincent Guittot339ca092012-07-10 14:13:12 +0100236
Mark Brown145bc292013-12-10 12:10:17 +0100237static unsigned long middle_capacity = 1;
Vincent Guittot339ca092012-07-10 14:13:12 +0100238
239/*
240 * Iterate all CPUs' descriptor in DT and compute the efficiency
241 * (as per table_efficiency). Also calculate a middle efficiency
242 * as close as possible to (max{eff_i} - min{eff_i}) / 2
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400243 * This is later used to scale the cpu_capacity field such that an
244 * 'average' CPU is of middle capacity. Also see the comments near
245 * table_efficiency[] and update_cpu_capacity().
Vincent Guittot339ca092012-07-10 14:13:12 +0100246 */
Jeevan Shriram490837a2017-03-01 17:52:49 -0800247static int __init parse_dt_topology(void)
Vincent Guittot339ca092012-07-10 14:13:12 +0100248{
Mark Brown145bc292013-12-10 12:10:17 +0100249 const struct cpu_efficiency *cpu_eff;
Jeevan Shriram490837a2017-03-01 17:52:49 -0800250 struct device_node *cn = NULL, *map;
Mark Brown44ae9032014-03-20 15:16:54 +0100251 unsigned long min_capacity = ULONG_MAX;
Vincent Guittot339ca092012-07-10 14:13:12 +0100252 unsigned long max_capacity = 0;
253 unsigned long capacity = 0;
Jeevan Shriram490837a2017-03-01 17:52:49 -0800254 int cpu = 0, ret = 0;
255
Srinivas Ramana3ddfb502016-06-28 12:02:28 +0530256 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
257 GFP_NOWAIT);
258
Jeevan Shriram490837a2017-03-01 17:52:49 -0800259 cn = of_find_node_by_path("/cpus");
260 if (!cn) {
261 pr_err("No CPU information found in DT\n");
262 return 0;
263 }
264
265 /*
266 * When topology is provided cpu-map is essentially a root
267 * cluster with restricted subnodes.
268 */
269 map = of_get_child_by_name(cn, "cpu-map");
270 if (!map)
271 goto out;
272
273 ret = parse_cluster(map, 0);
274 if (ret != 0)
275 goto out_map;
276
277 /*
278 * Check that all cores are in the topology; the SMP code will
279 * only mark cores described in the DT as possible.
280 */
281 for_each_possible_cpu(cpu)
282 if (cpu_topology[cpu].socket_id == -1)
283 ret = -EINVAL;
Vincent Guittot339ca092012-07-10 14:13:12 +0100284
Sudeep KarkadaNagesha816a8de2013-06-17 14:20:00 +0100285 for_each_possible_cpu(cpu) {
286 const u32 *rate;
Vincent Guittot339ca092012-07-10 14:13:12 +0100287 int len;
Pavankumar Kondeti31058f82015-05-07 17:14:48 +0530288 u32 efficiency;
Vincent Guittot339ca092012-07-10 14:13:12 +0100289
Sudeep KarkadaNagesha816a8de2013-06-17 14:20:00 +0100290 /* too early to use cpu->of_node */
291 cn = of_get_cpu_node(cpu, NULL);
292 if (!cn) {
293 pr_err("missing device node for CPU %d\n", cpu);
294 continue;
295 }
Vincent Guittot339ca092012-07-10 14:13:12 +0100296
Pavankumar Kondeti31058f82015-05-07 17:14:48 +0530297 /*
298 * The CPU efficiency value passed from the device tree
299 * overrides the value defined in the table_efficiency[]
300 */
301 if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
Vincent Guittot339ca092012-07-10 14:13:12 +0100302
Pavankumar Kondeti31058f82015-05-07 17:14:48 +0530303 for (cpu_eff = table_efficiency;
304 cpu_eff->compatible; cpu_eff++)
Vincent Guittot339ca092012-07-10 14:13:12 +0100305
Pavankumar Kondeti31058f82015-05-07 17:14:48 +0530306 if (of_device_is_compatible(cn,
307 cpu_eff->compatible))
308 break;
309
310 if (cpu_eff->compatible == NULL)
311 continue;
312
313 efficiency = cpu_eff->efficiency;
314 }
315
316 per_cpu(cpu_efficiency, cpu) = efficiency;
Srivatsa Vaddagirifd5704a2014-03-31 19:42:27 -0700317
Vincent Guittot339ca092012-07-10 14:13:12 +0100318 rate = of_get_property(cn, "clock-frequency", &len);
319 if (!rate || len != 4) {
320 pr_err("%s missing clock-frequency property\n",
321 cn->full_name);
322 continue;
323 }
324
Pavankumar Kondeti31058f82015-05-07 17:14:48 +0530325 capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
Vincent Guittot339ca092012-07-10 14:13:12 +0100326
327 /* Save min capacity of the system */
328 if (capacity < min_capacity)
329 min_capacity = capacity;
330
331 /* Save max capacity of the system */
332 if (capacity > max_capacity)
333 max_capacity = capacity;
334
Sudeep KarkadaNagesha816a8de2013-06-17 14:20:00 +0100335 cpu_capacity(cpu) = capacity;
Vincent Guittot339ca092012-07-10 14:13:12 +0100336 }
337
Vincent Guittot339ca092012-07-10 14:13:12 +0100338 /* If min and max capacities are equals, we bypass the update of the
339 * cpu_scale because all CPUs have the same capacity. Otherwise, we
340 * compute a middle_capacity factor that will ensure that the capacity
341 * of an 'average' CPU of the system will be as close as possible to
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400342 * SCHED_CAPACITY_SCALE, which is the default value, but with the
Vincent Guittot339ca092012-07-10 14:13:12 +0100343 * constraint explained near table_efficiency[].
344 */
Sudeep KarkadaNagesha816a8de2013-06-17 14:20:00 +0100345 if (4*max_capacity < (3*(max_capacity + min_capacity)))
Vincent Guittot339ca092012-07-10 14:13:12 +0100346 middle_capacity = (min_capacity + max_capacity)
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400347 >> (SCHED_CAPACITY_SHIFT+1);
Vincent Guittot339ca092012-07-10 14:13:12 +0100348 else
349 middle_capacity = ((max_capacity / 3)
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400350 >> (SCHED_CAPACITY_SHIFT-1)) + 1;
Jeevan Shriram490837a2017-03-01 17:52:49 -0800351out_map:
352 of_node_put(map);
353out:
354 of_node_put(cn);
355 return ret;
Vincent Guittot339ca092012-07-10 14:13:12 +0100356}
357
Dietmar Eggemannb4ca4bc2015-07-10 13:57:19 +0100358static const struct sched_group_energy * const cpu_core_energy(int cpu);
359
Vincent Guittot339ca092012-07-10 14:13:12 +0100360/*
361 * Look for a customed capacity of a CPU in the cpu_capacity table during the
362 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
363 * function returns directly for SMP system.
364 */
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400365static void update_cpu_capacity(unsigned int cpu)
Vincent Guittot339ca092012-07-10 14:13:12 +0100366{
Dietmar Eggemannb4ca4bc2015-07-10 13:57:19 +0100367 unsigned long capacity = SCHED_CAPACITY_SCALE;
Vincent Guittot339ca092012-07-10 14:13:12 +0100368
Dietmar Eggemannb4ca4bc2015-07-10 13:57:19 +0100369 if (cpu_core_energy(cpu)) {
370 int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
371 capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
372 }
373
374 set_capacity_scale(cpu, capacity);
Vincent Guittot339ca092012-07-10 14:13:12 +0100375
Russell King4ed89f22014-10-28 11:26:42 +0000376 pr_info("CPU%u: update cpu_capacity %lu\n",
Vincent Guittotd3bfca12014-08-26 13:06:48 +0200377 cpu, arch_scale_cpu_capacity(NULL, cpu));
Vincent Guittot339ca092012-07-10 14:13:12 +0100378}
379
380#else
Jeevan Shriram490837a2017-03-01 17:52:49 -0800381static inline int parse_dt_topology(void) {}
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400382static inline void update_cpu_capacity(unsigned int cpuid) {}
Vincent Guittot339ca092012-07-10 14:13:12 +0100383#endif
384
Lorenzo Pieralisidca463da2012-11-15 17:30:32 +0000385 /*
Vincent Guittot130d9aa2012-07-10 14:08:40 +0100386 * cpu topology table
387 */
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100388struct cputopo_arm cpu_topology[NR_CPUS];
Arnd Bergmann92bdd3f2013-05-31 22:49:22 +0100389EXPORT_SYMBOL_GPL(cpu_topology);
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100390
Vincent Guittot4cbd6b12011-11-29 15:50:20 +0100391const struct cpumask *cpu_coregroup_mask(int cpu)
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100392{
393 return &cpu_topology[cpu].core_sibling;
394}
395
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200396/*
397 * The current assumption is that we can power gate each core independently.
398 * This will be superseded by DT binding once available.
399 */
400const struct cpumask *cpu_corepower_mask(int cpu)
401{
402 return &cpu_topology[cpu].thread_sibling;
403}
404
Maria Yu1dc40b02017-09-26 16:50:56 +0800405static void update_cpu_power(unsigned int cpu)
406{
407 if (!cpu_capacity(cpu))
408 return;
409
410 set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
411
412 pr_info("CPU%u: update cpu_power %lu\n",
413 cpu, arch_scale_freq_power(NULL, cpu));
414}
415
416void update_cpu_power_capacity(int cpu)
417{
418 update_cpu_power(cpu);
419 update_cpu_capacity(cpu);
420}
421
Mark Brown145bc292013-12-10 12:10:17 +0100422static void update_siblings_masks(unsigned int cpuid)
Vincent Guittotcb75dac2012-07-10 14:11:11 +0100423{
424 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
425 int cpu;
426
427 /* update core and thread sibling masks */
428 for_each_possible_cpu(cpu) {
429 cpu_topo = &cpu_topology[cpu];
430
431 if (cpuid_topo->socket_id != cpu_topo->socket_id)
432 continue;
433
434 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
435 if (cpu != cpuid)
436 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
437
438 if (cpuid_topo->core_id != cpu_topo->core_id)
439 continue;
440
441 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
442 if (cpu != cpuid)
443 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
444 }
Srinivas Ramana3ddfb502016-06-28 12:02:28 +0530445
446 smp_wmb(); /* Ensure mask is updated*/
Vincent Guittotcb75dac2012-07-10 14:11:11 +0100447}
448
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100449/*
450 * store_cpu_topology is called at boot when only one cpu is running
451 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
452 * which prevents simultaneous write access to cpu_topology array
453 */
454void store_cpu_topology(unsigned int cpuid)
455{
456 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
457 unsigned int mpidr;
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100458
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100459 if (cpuid_topo->core_id != -1)
Jeevan Shriram490837a2017-03-01 17:52:49 -0800460 goto topology_populated;
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100461
462 mpidr = read_cpuid_mpidr();
463
464 /* create cpu topology mapping */
465 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
466 /*
467 * This is a multiprocessor system
468 * multiprocessor format & multiprocessor mode field are set
469 */
470
471 if (mpidr & MPIDR_MT_BITMASK) {
472 /* core performance interdependency */
Lorenzo Pieralisi71db5bf2012-11-16 15:24:06 +0000473 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
474 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
475 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100476 } else {
477 /* largely independent cores */
478 cpuid_topo->thread_id = -1;
Lorenzo Pieralisi71db5bf2012-11-16 15:24:06 +0000479 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
480 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100481 }
482 } else {
483 /*
484 * This is an uniprocessor system
485 * we are in multiprocessor format but uniprocessor system
486 * or in the old uniprocessor format
487 */
488 cpuid_topo->thread_id = -1;
489 cpuid_topo->core_id = 0;
490 cpuid_topo->socket_id = -1;
491 }
492
Jeevan Shriram490837a2017-03-01 17:52:49 -0800493 pr_info("CPU%u: thread %d, cpu %d, cluster %d, mpidr %x\n",
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100494 cpuid, cpu_topology[cpuid].thread_id,
495 cpu_topology[cpuid].core_id,
496 cpu_topology[cpuid].socket_id, mpidr);
Jeevan Shriram490837a2017-03-01 17:52:49 -0800497
498topology_populated:
499 update_siblings_masks(cpuid);
500 update_cpu_capacity(cpuid);
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100501}
502
Dietmar Eggemann61100bd2014-11-14 17:16:41 +0000503/* sd energy functions */
504static inline
505const struct sched_group_energy * const cpu_cluster_energy(int cpu)
506{
Maria Yue5a586c2018-02-13 12:59:23 +0800507 struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
508
509 if (sched_is_energy_aware() && !sge) {
510 pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
511 return NULL;
512 }
513
514 return sge;
Dietmar Eggemann61100bd2014-11-14 17:16:41 +0000515}
516
517static inline
518const struct sched_group_energy * const cpu_core_energy(int cpu)
519{
Maria Yue5a586c2018-02-13 12:59:23 +0800520 struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
521
522 if (sched_is_energy_aware() && !sge) {
523 pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
524 return NULL;
525 }
526
527 return sge;
Dietmar Eggemann61100bd2014-11-14 17:16:41 +0000528}
529
Guenter Roeckb6220ad2014-06-24 18:05:29 -0700530static inline int cpu_corepower_flags(void)
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200531{
Morten Rasmussen858d7182015-01-13 13:50:46 +0000532 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \
533 SD_SHARE_CAP_STATES;
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200534}
535
536static struct sched_domain_topology_level arm_topology[] = {
537#ifdef CONFIG_SCHED_MC
Dietmar Eggemann61100bd2014-11-14 17:16:41 +0000538 { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200539#endif
Dietmar Eggemann61100bd2014-11-14 17:16:41 +0000540 { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200541 { NULL, },
542};
543
Srinivas Ramana3ddfb502016-06-28 12:02:28 +0530544static void __init reset_cpu_topology(void)
545{
546 unsigned int cpu;
547
548 for_each_possible_cpu(cpu) {
549 struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
550
551 cpu_topo->thread_id = -1;
552 cpu_topo->core_id = -1;
553 cpu_topo->socket_id = -1;
554
555 cpumask_clear(&cpu_topo->core_sibling);
556 cpumask_clear(&cpu_topo->thread_sibling);
557 }
558 smp_wmb();
559}
560
561static void __init reset_cpu_capacity(void)
562{
563 unsigned int cpu;
564
565 for_each_possible_cpu(cpu)
566 set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
567}
568
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100569/*
570 * init_cpu_topology is called at boot when only one cpu is running
571 * which prevent simultaneous write access to cpu_topology array
572 */
Venkatraman Sathiyamoorthyf7e416e2012-08-03 07:58:33 +0100573void __init init_cpu_topology(void)
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100574{
575 unsigned int cpu;
576
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -0400577 /* init core mask and capacity */
Srinivas Ramana3ddfb502016-06-28 12:02:28 +0530578 reset_cpu_topology();
579 reset_cpu_capacity();
580 smp_wmb(); /* Ensure CPU topology and capacity are up to date */
Vincent Guittot339ca092012-07-10 14:13:12 +0100581
Jeevan Shriram490837a2017-03-01 17:52:49 -0800582 if (parse_dt_topology()) {
Srinivas Ramana3ddfb502016-06-28 12:02:28 +0530583 reset_cpu_topology();
584 reset_cpu_capacity();
Jeevan Shriram490837a2017-03-01 17:52:49 -0800585 }
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200586
Srivatsa Vaddagiri7cb075f2015-04-20 12:35:48 +0530587 for_each_possible_cpu(cpu)
588 update_siblings_masks(cpu);
589
Vincent Guittotfb2aa852014-04-11 11:44:41 +0200590 /* Set scheduler topology descriptor */
591 set_sched_topology(arm_topology);
Maria Yue5a586c2018-02-13 12:59:23 +0800592 init_sched_energy_costs();
Vincent Guittotc9018aa2011-08-08 13:21:59 +0100593}