blob: d90650964cf157077f142d8388582dba3ca68f92 [file] [log] [blame]
Kassey Li59e7a042020-05-20 17:43:00 +05301/* Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
Olav Haugan9306c802016-08-18 17:22:44 -070016#include <linux/cpu.h>
17#include <linux/cpumask.h>
18#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070019#include <linux/kthread.h>
20#include <linux/sched.h>
21#include <linux/sched/rt.h>
Joonwoo Parkbf785702017-09-26 17:24:22 -070022#include <linux/syscore_ops.h>
Olav Haugan9306c802016-08-18 17:22:44 -070023
Olav Haugan09bcc682016-09-02 17:12:20 -070024#include <trace/events/sched.h>
Pavankumar Kondetie027bd52017-10-16 14:09:29 +053025#include "sched.h"
26#include "walt.h"
Olav Haugan9306c802016-08-18 17:22:44 -070027
Pavankumar Kondeti4b621962017-04-10 14:08:31 +053028#define MAX_CPUS_PER_CLUSTER 6
Olav Haugane3a3db92016-06-27 11:35:43 -070029#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070030
Olav Haugane3a3db92016-06-27 11:35:43 -070031struct cluster_data {
32 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070033 unsigned int min_cpus;
34 unsigned int max_cpus;
35 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070036 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
37 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070038 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070039 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053040 unsigned int nr_isolated_cpus;
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +053041 unsigned int nr_not_preferred_cpus;
Joonwoo Parkbf785702017-09-26 17:24:22 -070042#ifdef CONFIG_SCHED_CORE_ROTATE
43 unsigned long set_max;
44 unsigned long set_cur;
45#endif
Olav Haugan833926cb2016-06-27 11:38:06 -070046 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070047 unsigned int need_cpus;
48 unsigned int task_thres;
Pavankumar Kondeti005309282017-05-10 15:43:29 +053049 unsigned int max_nr;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053050 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070051 struct list_head lru;
52 bool pending;
53 spinlock_t pending_lock;
54 bool is_big_cluster;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -070055 bool enable;
Olav Haugan9306c802016-08-18 17:22:44 -070056 int nrrun;
Olav Haugan833926cb2016-06-27 11:38:06 -070057 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070058 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070059 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070060 struct kobject kobj;
61};
62
Olav Haugane3a3db92016-06-27 11:35:43 -070063struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070064 bool is_busy;
65 unsigned int busy;
66 unsigned int cpu;
67 bool not_preferred;
68 struct cluster_data *cluster;
69 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070070 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070071};
72
Olav Haugan9306c802016-08-18 17:22:44 -070073static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070074static struct cluster_data cluster_state[MAX_CLUSTERS];
75static unsigned int num_clusters;
76
77#define for_each_cluster(cluster, idx) \
Kassey Li59e7a042020-05-20 17:43:00 +053078 for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\
79 (idx)++)
Olav Haugane3a3db92016-06-27 11:35:43 -070080
Olav Haugan9306c802016-08-18 17:22:44 -070081static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070082static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070083static void wake_up_core_ctl_thread(struct cluster_data *state);
84static bool initialized;
85
86static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Joonwoo Parkbf785702017-09-26 17:24:22 -070087static void cpuset_next(struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070088
89/* ========================= sysfs interface =========================== */
90
Olav Haugane3a3db92016-06-27 11:35:43 -070091static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070092 const char *buf, size_t count)
93{
94 unsigned int val;
95
96 if (sscanf(buf, "%u\n", &val) != 1)
97 return -EINVAL;
98
99 state->min_cpus = min(val, state->max_cpus);
Joonwoo Parkbf785702017-09-26 17:24:22 -0700100 cpuset_next(state);
Olav Haugan833926cb2016-06-27 11:38:06 -0700101 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700102
103 return count;
104}
105
Olav Haugane3a3db92016-06-27 11:35:43 -0700106static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700107{
108 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
109}
110
Olav Haugane3a3db92016-06-27 11:35:43 -0700111static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700112 const char *buf, size_t count)
113{
114 unsigned int val;
115
116 if (sscanf(buf, "%u\n", &val) != 1)
117 return -EINVAL;
118
119 val = min(val, state->num_cpus);
120 state->max_cpus = val;
121 state->min_cpus = min(state->min_cpus, state->max_cpus);
Joonwoo Parkbf785702017-09-26 17:24:22 -0700122 cpuset_next(state);
Olav Haugan833926cb2016-06-27 11:38:06 -0700123 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700124
125 return count;
126}
127
Olav Haugane3a3db92016-06-27 11:35:43 -0700128static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700129{
130 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
131}
132
Olav Haugane3a3db92016-06-27 11:35:43 -0700133static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700134 const char *buf, size_t count)
135{
136 unsigned int val;
137
138 if (sscanf(buf, "%u\n", &val) != 1)
139 return -EINVAL;
140
141 state->offline_delay_ms = val;
142 apply_need(state);
143
144 return count;
145}
146
Olav Haugane3a3db92016-06-27 11:35:43 -0700147static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700148{
149 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
150}
151
Olav Haugane3a3db92016-06-27 11:35:43 -0700152static ssize_t store_task_thres(struct cluster_data *state,
153 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700154{
155 unsigned int val;
156
157 if (sscanf(buf, "%u\n", &val) != 1)
158 return -EINVAL;
159
160 if (val < state->num_cpus)
161 return -EINVAL;
162
163 state->task_thres = val;
164 apply_need(state);
165
166 return count;
167}
168
Olav Haugane3a3db92016-06-27 11:35:43 -0700169static ssize_t show_offline_delay_ms(const struct cluster_data *state,
170 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700171{
172 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
173}
174
Olav Haugane3a3db92016-06-27 11:35:43 -0700175static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700176 const char *buf, size_t count)
177{
Olav Haugane3a3db92016-06-27 11:35:43 -0700178 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700179 int ret, i;
180
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530181 ret = sscanf(buf, "%u %u %u %u %u %u\n",
182 &val[0], &val[1], &val[2], &val[3],
183 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700184 if (ret != 1 && ret != state->num_cpus)
185 return -EINVAL;
186
187 if (ret == 1) {
188 for (i = 0; i < state->num_cpus; i++)
189 state->busy_up_thres[i] = val[0];
190 } else {
191 for (i = 0; i < state->num_cpus; i++)
192 state->busy_up_thres[i] = val[i];
193 }
194 apply_need(state);
195 return count;
196}
197
Olav Haugane3a3db92016-06-27 11:35:43 -0700198static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700199{
200 int i, count = 0;
201
202 for (i = 0; i < state->num_cpus; i++)
203 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
204 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700205
Olav Haugan9306c802016-08-18 17:22:44 -0700206 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
207 return count;
208}
209
Olav Haugane3a3db92016-06-27 11:35:43 -0700210static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700211 const char *buf, size_t count)
212{
Olav Haugane3a3db92016-06-27 11:35:43 -0700213 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700214 int ret, i;
215
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530216 ret = sscanf(buf, "%u %u %u %u %u %u\n",
217 &val[0], &val[1], &val[2], &val[3],
218 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700219 if (ret != 1 && ret != state->num_cpus)
220 return -EINVAL;
221
222 if (ret == 1) {
223 for (i = 0; i < state->num_cpus; i++)
224 state->busy_down_thres[i] = val[0];
225 } else {
226 for (i = 0; i < state->num_cpus; i++)
227 state->busy_down_thres[i] = val[i];
228 }
229 apply_need(state);
230 return count;
231}
232
Olav Haugane3a3db92016-06-27 11:35:43 -0700233static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700234{
235 int i, count = 0;
236
237 for (i = 0; i < state->num_cpus; i++)
238 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
239 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700240
Olav Haugan9306c802016-08-18 17:22:44 -0700241 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
242 return count;
243}
244
Olav Haugane3a3db92016-06-27 11:35:43 -0700245static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700246 const char *buf, size_t count)
247{
248 unsigned int val;
249
250 if (sscanf(buf, "%u\n", &val) != 1)
251 return -EINVAL;
252
253 state->is_big_cluster = val ? 1 : 0;
254 return count;
255}
256
Olav Haugane3a3db92016-06-27 11:35:43 -0700257static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700258{
259 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
260}
261
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700262static ssize_t store_enable(struct cluster_data *state,
263 const char *buf, size_t count)
264{
265 unsigned int val;
266 bool bval;
267
268 if (sscanf(buf, "%u\n", &val) != 1)
269 return -EINVAL;
270
271 bval = !!val;
272 if (bval != state->enable) {
273 state->enable = bval;
274 apply_need(state);
275 }
276
277 return count;
278}
279
280static ssize_t show_enable(const struct cluster_data *state, char *buf)
281{
282 return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
283}
284
Olav Haugane3a3db92016-06-27 11:35:43 -0700285static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700286{
287 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
288}
289
Olav Haugan833926cb2016-06-27 11:38:06 -0700290static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700291{
Olav Haugan833926cb2016-06-27 11:38:06 -0700292 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700293}
294
Olav Haugane3a3db92016-06-27 11:35:43 -0700295static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700296{
297 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700298 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700299 ssize_t count = 0;
300 unsigned int cpu;
301
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530302 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700303 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700304 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700305 cluster = c->cluster;
306 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700307 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700308
Olav Haugan9306c802016-08-18 17:22:44 -0700309 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700310 "CPU%u\n", cpu);
311 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700312 "\tCPU: %u\n", c->cpu);
313 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530314 "\tOnline: %u\n",
315 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700316 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530317 "\tIsolated: %u\n",
318 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700319 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700320 "\tFirst CPU: %u\n",
321 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700322 count += snprintf(buf + count, PAGE_SIZE - count,
323 "\tBusy%%: %u\n", c->busy);
324 count += snprintf(buf + count, PAGE_SIZE - count,
325 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700326 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530327 "\tNot preferred: %u\n",
328 c->not_preferred);
329 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700330 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700331 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700332 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700333 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700334 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700335 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530336 "\tNr isolated CPUs: %u\n",
337 cluster->nr_isolated_cpus);
338 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700339 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700340 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530341 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700342
343 return count;
344}
345
Olav Haugane3a3db92016-06-27 11:35:43 -0700346static ssize_t store_not_preferred(struct cluster_data *state,
347 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700348{
349 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700350 unsigned int i;
351 unsigned int val[MAX_CPUS_PER_CLUSTER];
352 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700353 int ret;
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +0530354 int not_preferred_count = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700355
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530356 ret = sscanf(buf, "%u %u %u %u %u %u\n",
357 &val[0], &val[1], &val[2], &val[3],
358 &val[4], &val[5]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530359 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700360 return -EINVAL;
361
Olav Haugane3a3db92016-06-27 11:35:43 -0700362 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530363 for (i = 0; i < state->num_cpus; i++) {
364 c = &per_cpu(cpu_state, i + state->first_cpu);
365 c->not_preferred = val[i];
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +0530366 not_preferred_count += !!val[i];
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530367 }
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +0530368 state->nr_not_preferred_cpus = not_preferred_count;
Olav Haugane3a3db92016-06-27 11:35:43 -0700369 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700370
371 return count;
372}
373
Olav Haugane3a3db92016-06-27 11:35:43 -0700374static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700375{
376 struct cpu_data *c;
377 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700378 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530379 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700380
Olav Haugane3a3db92016-06-27 11:35:43 -0700381 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530382 for (i = 0; i < state->num_cpus; i++) {
383 c = &per_cpu(cpu_state, i + state->first_cpu);
384 count += scnprintf(buf + count, PAGE_SIZE - count,
385 "CPU#%d: %u\n", c->cpu, c->not_preferred);
386 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700387 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700388
389 return count;
390}
391
Olav Haugane3a3db92016-06-27 11:35:43 -0700392
Olav Haugan9306c802016-08-18 17:22:44 -0700393struct core_ctl_attr {
394 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700395 ssize_t (*show)(const struct cluster_data *, char *);
396 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700397};
398
399#define core_ctl_attr_ro(_name) \
400static struct core_ctl_attr _name = \
401__ATTR(_name, 0444, show_##_name, NULL)
402
403#define core_ctl_attr_rw(_name) \
404static struct core_ctl_attr _name = \
405__ATTR(_name, 0644, show_##_name, store_##_name)
406
407core_ctl_attr_rw(min_cpus);
408core_ctl_attr_rw(max_cpus);
409core_ctl_attr_rw(offline_delay_ms);
410core_ctl_attr_rw(busy_up_thres);
411core_ctl_attr_rw(busy_down_thres);
412core_ctl_attr_rw(task_thres);
413core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700414core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700415core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700416core_ctl_attr_ro(global_state);
417core_ctl_attr_rw(not_preferred);
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700418core_ctl_attr_rw(enable);
Olav Haugan9306c802016-08-18 17:22:44 -0700419
420static struct attribute *default_attrs[] = {
421 &min_cpus.attr,
422 &max_cpus.attr,
423 &offline_delay_ms.attr,
424 &busy_up_thres.attr,
425 &busy_down_thres.attr,
426 &task_thres.attr,
427 &is_big_cluster.attr,
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700428 &enable.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700429 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700430 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700431 &global_state.attr,
432 &not_preferred.attr,
433 NULL
434};
435
Olav Haugane3a3db92016-06-27 11:35:43 -0700436#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700437#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
438static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
439{
Olav Haugane3a3db92016-06-27 11:35:43 -0700440 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700441 struct core_ctl_attr *cattr = to_attr(attr);
442 ssize_t ret = -EIO;
443
444 if (cattr->show)
445 ret = cattr->show(data, buf);
446
447 return ret;
448}
449
450static ssize_t store(struct kobject *kobj, struct attribute *attr,
451 const char *buf, size_t count)
452{
Olav Haugane3a3db92016-06-27 11:35:43 -0700453 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700454 struct core_ctl_attr *cattr = to_attr(attr);
455 ssize_t ret = -EIO;
456
457 if (cattr->store)
458 ret = cattr->store(data, buf, count);
459
460 return ret;
461}
462
463static const struct sysfs_ops sysfs_ops = {
464 .show = show,
465 .store = store,
466};
467
468static struct kobj_type ktype_core_ctl = {
469 .sysfs_ops = &sysfs_ops,
470 .default_attrs = default_attrs,
471};
472
473/* ==================== runqueue based core count =================== */
474
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530475static void update_running_avg(void)
Olav Haugan9306c802016-08-18 17:22:44 -0700476{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530477 int avg, iowait_avg, big_avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530478 int max_nr, big_max_nr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700479 struct cluster_data *cluster;
480 unsigned int index = 0;
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530481 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700482
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530483 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
484 &max_nr, &big_max_nr);
Pavankumar Kondeti4e13d112018-01-25 01:12:08 +0530485 walt_rotation_checkpoint(big_avg);
Olav Haugan9306c802016-08-18 17:22:44 -0700486
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530487 spin_lock_irqsave(&state_lock, flags);
Olav Haugane3a3db92016-06-27 11:35:43 -0700488 for_each_cluster(cluster, index) {
489 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700490 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700491 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530492 cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
Olav Haugan9306c802016-08-18 17:22:44 -0700493 }
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530494 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700495}
496
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530497#define MAX_NR_THRESHOLD 4
Olav Haugan9306c802016-08-18 17:22:44 -0700498/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700499static unsigned int apply_task_need(const struct cluster_data *cluster,
500 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700501{
Olav Haugan833926cb2016-06-27 11:38:06 -0700502 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700503 if (cluster->nrrun >= cluster->task_thres)
504 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700505
Olav Haugan833926cb2016-06-27 11:38:06 -0700506 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700507 if (cluster->nrrun > new_need)
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530508 new_need = new_need + 1;
509
510 /*
511 * We don't want tasks to be overcrowded in a cluster.
512 * If any CPU has more than MAX_NR_THRESHOLD in the last
513 * window, bring another CPU to help out.
514 */
515 if (cluster->max_nr > MAX_NR_THRESHOLD)
516 new_need = new_need + 1;
Olav Haugan9306c802016-08-18 17:22:44 -0700517
518 return new_need;
519}
520
Olav Haugan9306c802016-08-18 17:22:44 -0700521/* ======================= load based core count ====================== */
522
Olav Haugane3a3db92016-06-27 11:35:43 -0700523static unsigned int apply_limits(const struct cluster_data *cluster,
524 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700525{
Olav Haugane3a3db92016-06-27 11:35:43 -0700526 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700527}
528
Olav Haugan833926cb2016-06-27 11:38:06 -0700529static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
530{
531 return cluster->num_cpus -
532 sched_isolate_count(&cluster->cpu_mask, true);
533}
534
535static bool is_active(const struct cpu_data *state)
536{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530537 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700538}
539
540static bool adjustment_possible(const struct cluster_data *cluster,
541 unsigned int need)
542{
543 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530544 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700545}
546
Olav Haugane3a3db92016-06-27 11:35:43 -0700547static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700548{
549 unsigned long flags;
550 struct cpu_data *c;
551 unsigned int need_cpus = 0, last_need, thres_idx;
552 int ret = 0;
553 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700554 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530555 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700556
Olav Haugane3a3db92016-06-27 11:35:43 -0700557 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700558 return 0;
559
560 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700561
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700562 if (cluster->boost || !cluster->enable) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700563 need_cpus = cluster->max_cpus;
564 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530565 cluster->active_cpus = get_active_cpu_count(cluster);
566 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700567 list_for_each_entry(c, &cluster->lru, sib) {
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530568 bool old_is_busy = c->is_busy;
569
Pavankumar Kondetie027bd52017-10-16 14:09:29 +0530570 if (c->busy >= cluster->busy_up_thres[thres_idx] ||
571 sched_cpu_high_irqload(c->cpu))
Olav Haugan833926cb2016-06-27 11:38:06 -0700572 c->is_busy = true;
573 else if (c->busy < cluster->busy_down_thres[thres_idx])
574 c->is_busy = false;
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530575
576 trace_core_ctl_set_busy(c->cpu, c->busy, old_is_busy,
577 c->is_busy);
Olav Haugan833926cb2016-06-27 11:38:06 -0700578 need_cpus += c->is_busy;
579 }
580 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700581 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700582 new_need = apply_limits(cluster, need_cpus);
583 need_flag = adjustment_possible(cluster, new_need);
584
Olav Haugane3a3db92016-06-27 11:35:43 -0700585 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530586 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700587
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530588 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700589 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530590 } else {
Pavankumar Kondetif7ed52d2018-03-13 16:30:12 +0530591 /*
592 * When there is no change in need and there are no more
593 * active CPUs than currently needed, just update the
594 * need time stamp and return.
595 */
596 if (new_need == last_need && new_need == cluster->active_cpus) {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530597 cluster->need_ts = now;
598 spin_unlock_irqrestore(&state_lock, flags);
599 return 0;
600 }
Olav Haugan9306c802016-08-18 17:22:44 -0700601
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530602 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700603 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700604 }
605
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530606 if (ret) {
607 cluster->need_ts = now;
608 cluster->need_cpus = new_need;
609 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530610 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700611 ret && need_flag);
612 spin_unlock_irqrestore(&state_lock, flags);
613
614 return ret && need_flag;
615}
616
Olav Haugane3a3db92016-06-27 11:35:43 -0700617static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700618{
Olav Haugane3a3db92016-06-27 11:35:43 -0700619 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700620 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700621}
622
Olav Haugan9306c802016-08-18 17:22:44 -0700623/* ========================= core count enforcement ==================== */
624
Olav Haugan833926cb2016-06-27 11:38:06 -0700625static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700626{
627 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700628
Olav Haugane3a3db92016-06-27 11:35:43 -0700629 spin_lock_irqsave(&cluster->pending_lock, flags);
630 cluster->pending = true;
631 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700632 wake_up_process(cluster->core_ctl_thread);
Olav Haugan833926cb2016-06-27 11:38:06 -0700633}
634
635static u64 core_ctl_check_timestamp;
Olav Haugan833926cb2016-06-27 11:38:06 -0700636
Olav Haugana024f472016-10-13 10:34:11 -0700637int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700638{
639 unsigned int index = 0;
640 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700641 unsigned long flags;
642 int ret = 0;
643 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700644
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530645 if (unlikely(!initialized))
646 return 0;
647
Olav Haugana024f472016-10-13 10:34:11 -0700648 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700649 for_each_cluster(cluster, index) {
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530650 if (boost) {
651 boost_state_changed = !cluster->boost;
652 ++cluster->boost;
653 } else {
654 if (!cluster->boost) {
655 pr_err("Error turning off boost. Boost already turned off\n");
656 ret = -EINVAL;
657 break;
Olav Haugana024f472016-10-13 10:34:11 -0700658 } else {
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530659 --cluster->boost;
660 boost_state_changed = !cluster->boost;
Olav Haugana024f472016-10-13 10:34:11 -0700661 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700662 }
Olav Haugan9306c802016-08-18 17:22:44 -0700663 }
Olav Haugana024f472016-10-13 10:34:11 -0700664 spin_unlock_irqrestore(&state_lock, flags);
665
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530666 if (boost_state_changed) {
667 index = 0;
668 for_each_cluster(cluster, index)
669 apply_need(cluster);
670 }
Olav Haugana024f472016-10-13 10:34:11 -0700671
672 trace_core_ctl_set_boost(cluster->boost, ret);
673
674 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700675}
Olav Haugan4d77e572016-11-14 16:14:23 -0800676EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700677
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530678void core_ctl_check(u64 window_start)
Olav Haugan9306c802016-08-18 17:22:44 -0700679{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530680 int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530681 struct cpu_data *c;
682 struct cluster_data *cluster;
683 unsigned int index = 0;
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530684 unsigned long flags;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530685
Olav Haugan833926cb2016-06-27 11:38:06 -0700686 if (unlikely(!initialized))
687 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700688
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530689 if (window_start == core_ctl_check_timestamp)
690 return;
Olav Haugane3a3db92016-06-27 11:35:43 -0700691
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530692 core_ctl_check_timestamp = window_start;
Olav Haugan833926cb2016-06-27 11:38:06 -0700693
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530694 spin_lock_irqsave(&state_lock, flags);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530695 for_each_possible_cpu(cpu) {
696
697 c = &per_cpu(cpu_state, cpu);
698 cluster = c->cluster;
699
700 if (!cluster || !cluster->inited)
701 continue;
702
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530703 c->busy = sched_get_cpu_util(cpu);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530704 }
Pavankumar Kondetib572a102017-10-16 14:43:06 +0530705 spin_unlock_irqrestore(&state_lock, flags);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530706
707 update_running_avg();
708
709 for_each_cluster(cluster, index) {
710 if (eval_need(cluster))
711 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700712 }
Olav Haugan9306c802016-08-18 17:22:44 -0700713}
714
Olav Haugan833926cb2016-06-27 11:38:06 -0700715static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700716{
Olav Haugan833926cb2016-06-27 11:38:06 -0700717 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700718
Olav Haugan833926cb2016-06-27 11:38:06 -0700719 spin_lock_irqsave(&state_lock, flags);
720 list_del(&cpu_data->sib);
721 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
722 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700723}
724
Joonwoo Parkbf785702017-09-26 17:24:22 -0700725#ifdef CONFIG_SCHED_CORE_ROTATE
726static void cpuset_next(struct cluster_data *cluster)
727{
728 int cpus_needed = cluster->num_cpus - cluster->min_cpus;
729
730 cluster->set_cur++;
731 cluster->set_cur = min(cluster->set_cur, cluster->set_max);
732
733 /*
734 * This loop generates bit sets from 0 to pow(num_cpus, 2) - 1.
735 * We start loop from set_cur to set_cur - 1 and break when weight of
736 * set_cur equals to cpus_needed.
737 */
738
739 while (1) {
740 if (bitmap_weight(&cluster->set_cur, BITS_PER_LONG) ==
741 cpus_needed) {
742 break;
743 }
744 cluster->set_cur++;
745 cluster->set_cur = min(cluster->set_cur, cluster->set_max);
746 if (cluster->set_cur == cluster->set_max)
747 /* roll over */
748 cluster->set_cur = 0;
749 };
750
751 pr_debug("first_cpu=%d cpus_needed=%d set_cur=0x%lx\n",
752 cluster->first_cpu, cpus_needed, cluster->set_cur);
753}
754
755static bool should_we_isolate(int cpu, struct cluster_data *cluster)
756{
757 /* cpu should be part of cluster */
758 return !!(cluster->set_cur & (1 << (cpu - cluster->first_cpu)));
759}
760
761static void core_ctl_resume(void)
762{
763 unsigned int i = 0;
764 struct cluster_data *cluster;
765
766 /* move to next isolation cpu set */
767 for_each_cluster(cluster, i)
768 cpuset_next(cluster);
769}
770
771static struct syscore_ops core_ctl_syscore_ops = {
772 .resume = core_ctl_resume,
773};
774
775#else
776
777static void cpuset_next(struct cluster_data *cluster) { }
778
779static bool should_we_isolate(int cpu, struct cluster_data *cluster)
780{
781 return true;
782}
783
784#endif
785
Olav Haugan833926cb2016-06-27 11:38:06 -0700786static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700787{
Olav Haugan9306c802016-08-18 17:22:44 -0700788 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800789 unsigned long flags;
790 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530791 unsigned int nr_isolated = 0;
Pavankumar Kondetib91a3bb2018-05-30 08:34:12 +0530792 bool first_pass = cluster->nr_not_preferred_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700793
Olav Haugan85970732016-11-08 13:45:01 -0800794 /*
795 * Protect against entry being removed (and added at tail) by other
796 * thread (hotplug).
797 */
798 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700799 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800800 if (!num_cpus--)
801 break;
802
Olav Haugan833926cb2016-06-27 11:38:06 -0700803 if (!is_active(c))
804 continue;
805 if (cluster->active_cpus == need)
806 break;
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +0530807 /* Don't isolate busy CPUs. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700808 if (c->is_busy)
809 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700810
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +0530811 /*
812 * We isolate only the not_preferred CPUs. If none
813 * of the CPUs are selected as not_preferred, then
814 * all CPUs are eligible for isolation.
815 */
816 if (cluster->nr_not_preferred_cpus && !c->not_preferred)
817 continue;
818
Joonwoo Parkbf785702017-09-26 17:24:22 -0700819 if (!should_we_isolate(c->cpu, cluster))
820 continue;
821
Olav Haugan85970732016-11-08 13:45:01 -0800822 spin_unlock_irqrestore(&state_lock, flags);
823
Olav Haugan833926cb2016-06-27 11:38:06 -0700824 pr_debug("Trying to isolate CPU%u\n", c->cpu);
825 if (!sched_isolate_cpu(c->cpu)) {
826 c->isolated_by_us = true;
827 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530828 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700829 } else {
830 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700831 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700832 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800833 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700834 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530835 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800836 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700837
Pavankumar Kondetib91a3bb2018-05-30 08:34:12 +0530838again:
Olav Haugan833926cb2016-06-27 11:38:06 -0700839 /*
840 * If the number of active CPUs is within the limits, then
841 * don't force isolation of any busy CPUs.
842 */
843 if (cluster->active_cpus <= cluster->max_cpus)
844 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700845
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530846 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800847 num_cpus = cluster->num_cpus;
848 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700849 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800850 if (!num_cpus--)
851 break;
852
Olav Haugan833926cb2016-06-27 11:38:06 -0700853 if (!is_active(c))
854 continue;
855 if (cluster->active_cpus <= cluster->max_cpus)
856 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700857
Pavankumar Kondetib91a3bb2018-05-30 08:34:12 +0530858 if (first_pass && !c->not_preferred)
859 continue;
860
Olav Haugan85970732016-11-08 13:45:01 -0800861 spin_unlock_irqrestore(&state_lock, flags);
862
Olav Haugan833926cb2016-06-27 11:38:06 -0700863 pr_debug("Trying to isolate CPU%u\n", c->cpu);
864 if (!sched_isolate_cpu(c->cpu)) {
865 c->isolated_by_us = true;
866 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530867 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700868 } else {
869 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700870 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700871 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800872 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700873 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530874 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800875 spin_unlock_irqrestore(&state_lock, flags);
876
Pavankumar Kondetib91a3bb2018-05-30 08:34:12 +0530877 if (first_pass && cluster->active_cpus > cluster->max_cpus) {
878 first_pass = false;
879 goto again;
880 }
Olav Haugan9306c802016-08-18 17:22:44 -0700881}
882
Olav Haugan833926cb2016-06-27 11:38:06 -0700883static void __try_to_unisolate(struct cluster_data *cluster,
884 unsigned int need, bool force)
885{
886 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800887 unsigned long flags;
888 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530889 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700890
Olav Haugan85970732016-11-08 13:45:01 -0800891 /*
892 * Protect against entry being removed (and added at tail) by other
893 * thread (hotplug).
894 */
895 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700896 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800897 if (!num_cpus--)
898 break;
899
Olav Haugan833926cb2016-06-27 11:38:06 -0700900 if (!c->isolated_by_us)
901 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530902 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700903 (!force && c->not_preferred))
904 continue;
905 if (cluster->active_cpus == need)
906 break;
907
Olav Haugan85970732016-11-08 13:45:01 -0800908 spin_unlock_irqrestore(&state_lock, flags);
909
Olav Haugan833926cb2016-06-27 11:38:06 -0700910 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
911 if (!sched_unisolate_cpu(c->cpu)) {
912 c->isolated_by_us = false;
913 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530914 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700915 } else {
916 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
917 }
918 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800919 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700920 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530921 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800922 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700923}
924
925static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
926{
927 bool force_use_non_preferred = false;
928
929 __try_to_unisolate(cluster, need, force_use_non_preferred);
930
931 if (cluster->active_cpus == need)
932 return;
933
934 force_use_non_preferred = true;
935 __try_to_unisolate(cluster, need, force_use_non_preferred);
936}
937
938static void __ref do_core_ctl(struct cluster_data *cluster)
939{
940 unsigned int need;
941
942 need = apply_limits(cluster, cluster->need_cpus);
943
944 if (adjustment_possible(cluster, need)) {
945 pr_debug("Trying to adjust group %u from %u to %u\n",
946 cluster->first_cpu, cluster->active_cpus, need);
947
948 if (cluster->active_cpus > need)
949 try_to_isolate(cluster, need);
950 else if (cluster->active_cpus < need)
951 try_to_unisolate(cluster, need);
952 }
953}
954
955static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700956{
Olav Haugane3a3db92016-06-27 11:35:43 -0700957 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700958 unsigned long flags;
959
960 while (1) {
961 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700962 spin_lock_irqsave(&cluster->pending_lock, flags);
963 if (!cluster->pending) {
964 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700965 schedule();
966 if (kthread_should_stop())
967 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700968 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700969 }
970 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700971 cluster->pending = false;
972 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700973
Olav Haugan833926cb2016-06-27 11:38:06 -0700974 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700975 }
976
977 return 0;
978}
979
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700980static int isolation_cpuhp_state(unsigned int cpu, bool online)
Olav Haugan9306c802016-08-18 17:22:44 -0700981{
Olav Haugan9306c802016-08-18 17:22:44 -0700982 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700983 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700984 unsigned int need;
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700985 bool do_wakeup = false, unisolated = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530986 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700987
Olav Haugane3a3db92016-06-27 11:35:43 -0700988 if (unlikely(!cluster || !cluster->inited))
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700989 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700990
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700991 if (online) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700992 cluster->active_cpus = get_active_cpu_count(cluster);
993
Olav Haugan9306c802016-08-18 17:22:44 -0700994 /*
995 * Moving to the end of the list should only happen in
996 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
997 * infinite list traversal when thermal (or other entities)
998 * reject trying to online CPUs.
999 */
Olav Haugan833926cb2016-06-27 11:38:06 -07001000 move_cpu_lru(state);
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001001 } else {
Olav Haugan833926cb2016-06-27 11:38:06 -07001002 /*
1003 * We don't want to have a CPU both offline and isolated.
1004 * So unisolate a CPU that went down if it was isolated by us.
1005 */
1006 if (state->isolated_by_us) {
1007 sched_unisolate_cpu_unlocked(cpu);
1008 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +05301009 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -07001010 }
1011
Olav Haugan9306c802016-08-18 17:22:44 -07001012 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -07001013 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -07001014
Olav Haugan9306c802016-08-18 17:22:44 -07001015 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -07001016 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001017 }
1018
Olav Haugan833926cb2016-06-27 11:38:06 -07001019 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +05301020 spin_lock_irqsave(&state_lock, flags);
1021 if (unisolated)
1022 cluster->nr_isolated_cpus--;
1023 do_wakeup = adjustment_possible(cluster, need);
1024 spin_unlock_irqrestore(&state_lock, flags);
1025 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -07001026 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001027
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001028 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -07001029}
1030
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001031static int core_ctl_isolation_online_cpu(unsigned int cpu)
1032{
1033 return isolation_cpuhp_state(cpu, true);
1034}
1035
1036static int core_ctl_isolation_dead_cpu(unsigned int cpu)
1037{
1038 return isolation_cpuhp_state(cpu, false);
1039}
Olav Haugan9306c802016-08-18 17:22:44 -07001040
1041/* ============================ init code ============================== */
1042
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301043static cpumask_var_t core_ctl_disable_cpumask;
1044static bool core_ctl_disable_cpumask_present;
1045
1046static int __init core_ctl_disable_setup(char *str)
1047{
1048 if (!*str)
1049 return -EINVAL;
1050
1051 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
1052
1053 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
1054 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
1055 return -EINVAL;
1056 }
1057
1058 core_ctl_disable_cpumask_present = true;
1059 pr_info("disable_cpumask=%*pbl\n",
1060 cpumask_pr_args(core_ctl_disable_cpumask));
1061
1062 return 0;
1063}
1064early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
1065
1066static bool should_skip(const struct cpumask *mask)
1067{
1068 if (!core_ctl_disable_cpumask_present)
1069 return false;
1070
1071 /*
1072 * We operate on a cluster basis. Disable the core_ctl for
1073 * a cluster, if all of it's cpus are specified in
1074 * core_ctl_disable_cpumask
1075 */
1076 return cpumask_subset(mask, core_ctl_disable_cpumask);
1077}
1078
Olav Haugane3a3db92016-06-27 11:35:43 -07001079static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
1080{
1081 unsigned int i;
1082
1083 for (i = 0; i < num_clusters; ++i) {
1084 if (cluster_state[i].first_cpu == first_cpu)
1085 return &cluster_state[i];
1086 }
1087
1088 return NULL;
1089}
1090
1091static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -07001092{
1093 struct device *dev;
1094 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001095 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001096 struct cpu_data *state;
1097 unsigned int cpu;
1098 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1099
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301100 if (should_skip(mask))
1101 return 0;
1102
Olav Haugane3a3db92016-06-27 11:35:43 -07001103 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001104 return 0;
1105
1106 dev = get_cpu_device(first_cpu);
1107 if (!dev)
1108 return -ENODEV;
1109
1110 pr_info("Creating CPU group %d\n", first_cpu);
1111
Olav Haugane3a3db92016-06-27 11:35:43 -07001112 if (num_clusters == MAX_CLUSTERS) {
1113 pr_err("Unsupported number of clusters. Only %u supported\n",
1114 MAX_CLUSTERS);
1115 return -EINVAL;
1116 }
1117 cluster = &cluster_state[num_clusters];
1118 ++num_clusters;
1119
Olav Haugan833926cb2016-06-27 11:38:06 -07001120 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001121 cluster->num_cpus = cpumask_weight(mask);
1122 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001123 pr_err("HW configuration not supported\n");
1124 return -EINVAL;
1125 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001126 cluster->first_cpu = first_cpu;
1127 cluster->min_cpus = 1;
1128 cluster->max_cpus = cluster->num_cpus;
1129 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001130 cluster->offline_delay_ms = 100;
1131 cluster->task_thres = UINT_MAX;
1132 cluster->nrrun = cluster->num_cpus;
Joonwoo Parkbf785702017-09-26 17:24:22 -07001133#ifdef CONFIG_SCHED_CORE_ROTATE
1134 cluster->set_max = cluster->num_cpus * cluster->num_cpus;
1135 /* by default mark all cpus as eligible */
1136 cluster->set_cur = cluster->set_max - 1;
1137#endif
Sabyasachi Singh9453cb22017-05-09 18:50:19 -07001138 cluster->enable = true;
Pavankumar Kondeti8a0cb952017-10-05 14:36:08 +05301139 cluster->nr_not_preferred_cpus = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -07001140 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001141 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001142
1143 for_each_cpu(cpu, mask) {
1144 pr_info("Init CPU%u state\n", cpu);
1145
1146 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001147 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001148 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001149 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001150 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001151 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001152
Olav Haugan833926cb2016-06-27 11:38:06 -07001153 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001154 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001155 if (IS_ERR(cluster->core_ctl_thread))
1156 return PTR_ERR(cluster->core_ctl_thread);
1157
1158 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001159 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001160
Olav Haugane3a3db92016-06-27 11:35:43 -07001161 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001162
Olav Haugane3a3db92016-06-27 11:35:43 -07001163 kobject_init(&cluster->kobj, &ktype_core_ctl);
1164 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001165}
1166
Olav Haugan9306c802016-08-18 17:22:44 -07001167static int __init core_ctl_init(void)
1168{
Olav Haugan9306c802016-08-18 17:22:44 -07001169 unsigned int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301170 struct cpumask cpus = *cpu_possible_mask;
Olav Haugan9306c802016-08-18 17:22:44 -07001171
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301172 if (should_skip(cpu_possible_mask))
1173 return 0;
1174
Joonwoo Parkbf785702017-09-26 17:24:22 -07001175#ifdef CONFIG_SCHED_CORE_ROTATE
1176 register_syscore_ops(&core_ctl_syscore_ops);
1177#endif
1178
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001179 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1180 "core_ctl/isolation:online",
1181 core_ctl_isolation_online_cpu, NULL);
1182
1183 cpuhp_setup_state_nocalls(CPUHP_CORE_CTL_ISOLATION_DEAD,
1184 "core_ctl/isolation:dead",
1185 NULL, core_ctl_isolation_dead_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -07001186
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301187 for_each_cpu(cpu, &cpus) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001188 int ret;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301189 const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001190
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301191 ret = cluster_init(cluster_cpus);
1192 if (ret)
1193 pr_warn("unable to create core ctl group: %d\n", ret);
1194 cpumask_andnot(&cpus, &cpus, cluster_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -07001195 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001196 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001197 return 0;
1198}
1199
1200late_initcall(core_ctl_init);