blob: 9e21380dafef441f2a855a0d43486e990ef54d86 [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
Olav Haugan9306c802016-08-18 17:22:44 -070016#include <linux/cpu.h>
17#include <linux/cpumask.h>
18#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070019#include <linux/kthread.h>
20#include <linux/sched.h>
21#include <linux/sched/rt.h>
Joonwoo Parkbf785702017-09-26 17:24:22 -070022#include <linux/syscore_ops.h>
Olav Haugan9306c802016-08-18 17:22:44 -070023
Olav Haugan09bcc682016-09-02 17:12:20 -070024#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070025
Pavankumar Kondeti4b621962017-04-10 14:08:31 +053026#define MAX_CPUS_PER_CLUSTER 6
Olav Haugane3a3db92016-06-27 11:35:43 -070027#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070028
Olav Haugane3a3db92016-06-27 11:35:43 -070029struct cluster_data {
30 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070031 unsigned int min_cpus;
32 unsigned int max_cpus;
33 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070034 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
35 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070036 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053038 unsigned int nr_isolated_cpus;
Joonwoo Parkbf785702017-09-26 17:24:22 -070039#ifdef CONFIG_SCHED_CORE_ROTATE
40 unsigned long set_max;
41 unsigned long set_cur;
42#endif
Olav Haugan833926cb2016-06-27 11:38:06 -070043 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070044 unsigned int need_cpus;
45 unsigned int task_thres;
Pavankumar Kondeti005309282017-05-10 15:43:29 +053046 unsigned int max_nr;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053047 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070048 struct list_head lru;
49 bool pending;
50 spinlock_t pending_lock;
51 bool is_big_cluster;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -070052 bool enable;
Olav Haugan9306c802016-08-18 17:22:44 -070053 int nrrun;
Olav Haugan833926cb2016-06-27 11:38:06 -070054 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070055 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070056 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070057 struct kobject kobj;
58};
59
Olav Haugane3a3db92016-06-27 11:35:43 -070060struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070061 bool is_busy;
62 unsigned int busy;
63 unsigned int cpu;
64 bool not_preferred;
65 struct cluster_data *cluster;
66 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070067 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070068};
69
Olav Haugan9306c802016-08-18 17:22:44 -070070static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070071static struct cluster_data cluster_state[MAX_CLUSTERS];
72static unsigned int num_clusters;
73
74#define for_each_cluster(cluster, idx) \
75 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
76 (idx)++, (cluster) = &cluster_state[idx])
77
Olav Haugan9306c802016-08-18 17:22:44 -070078static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070079static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070080static void wake_up_core_ctl_thread(struct cluster_data *state);
81static bool initialized;
82
83static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Joonwoo Parkbf785702017-09-26 17:24:22 -070084static void cpuset_next(struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070085
86/* ========================= sysfs interface =========================== */
87
Olav Haugane3a3db92016-06-27 11:35:43 -070088static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070089 const char *buf, size_t count)
90{
91 unsigned int val;
92
93 if (sscanf(buf, "%u\n", &val) != 1)
94 return -EINVAL;
95
96 state->min_cpus = min(val, state->max_cpus);
Joonwoo Parkbf785702017-09-26 17:24:22 -070097 cpuset_next(state);
Olav Haugan833926cb2016-06-27 11:38:06 -070098 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070099
100 return count;
101}
102
Olav Haugane3a3db92016-06-27 11:35:43 -0700103static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700104{
105 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
106}
107
Olav Haugane3a3db92016-06-27 11:35:43 -0700108static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700109 const char *buf, size_t count)
110{
111 unsigned int val;
112
113 if (sscanf(buf, "%u\n", &val) != 1)
114 return -EINVAL;
115
116 val = min(val, state->num_cpus);
117 state->max_cpus = val;
118 state->min_cpus = min(state->min_cpus, state->max_cpus);
Joonwoo Parkbf785702017-09-26 17:24:22 -0700119 cpuset_next(state);
Olav Haugan833926cb2016-06-27 11:38:06 -0700120 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700121
122 return count;
123}
124
Olav Haugane3a3db92016-06-27 11:35:43 -0700125static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700126{
127 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
128}
129
Olav Haugane3a3db92016-06-27 11:35:43 -0700130static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700131 const char *buf, size_t count)
132{
133 unsigned int val;
134
135 if (sscanf(buf, "%u\n", &val) != 1)
136 return -EINVAL;
137
138 state->offline_delay_ms = val;
139 apply_need(state);
140
141 return count;
142}
143
Olav Haugane3a3db92016-06-27 11:35:43 -0700144static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700145{
146 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
147}
148
Olav Haugane3a3db92016-06-27 11:35:43 -0700149static ssize_t store_task_thres(struct cluster_data *state,
150 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700151{
152 unsigned int val;
153
154 if (sscanf(buf, "%u\n", &val) != 1)
155 return -EINVAL;
156
157 if (val < state->num_cpus)
158 return -EINVAL;
159
160 state->task_thres = val;
161 apply_need(state);
162
163 return count;
164}
165
Olav Haugane3a3db92016-06-27 11:35:43 -0700166static ssize_t show_offline_delay_ms(const struct cluster_data *state,
167 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700168{
169 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
170}
171
Olav Haugane3a3db92016-06-27 11:35:43 -0700172static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700173 const char *buf, size_t count)
174{
Olav Haugane3a3db92016-06-27 11:35:43 -0700175 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700176 int ret, i;
177
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530178 ret = sscanf(buf, "%u %u %u %u %u %u\n",
179 &val[0], &val[1], &val[2], &val[3],
180 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700181 if (ret != 1 && ret != state->num_cpus)
182 return -EINVAL;
183
184 if (ret == 1) {
185 for (i = 0; i < state->num_cpus; i++)
186 state->busy_up_thres[i] = val[0];
187 } else {
188 for (i = 0; i < state->num_cpus; i++)
189 state->busy_up_thres[i] = val[i];
190 }
191 apply_need(state);
192 return count;
193}
194
Olav Haugane3a3db92016-06-27 11:35:43 -0700195static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700196{
197 int i, count = 0;
198
199 for (i = 0; i < state->num_cpus; i++)
200 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
201 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700202
Olav Haugan9306c802016-08-18 17:22:44 -0700203 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
204 return count;
205}
206
Olav Haugane3a3db92016-06-27 11:35:43 -0700207static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700208 const char *buf, size_t count)
209{
Olav Haugane3a3db92016-06-27 11:35:43 -0700210 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700211 int ret, i;
212
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530213 ret = sscanf(buf, "%u %u %u %u %u %u\n",
214 &val[0], &val[1], &val[2], &val[3],
215 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700216 if (ret != 1 && ret != state->num_cpus)
217 return -EINVAL;
218
219 if (ret == 1) {
220 for (i = 0; i < state->num_cpus; i++)
221 state->busy_down_thres[i] = val[0];
222 } else {
223 for (i = 0; i < state->num_cpus; i++)
224 state->busy_down_thres[i] = val[i];
225 }
226 apply_need(state);
227 return count;
228}
229
Olav Haugane3a3db92016-06-27 11:35:43 -0700230static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700231{
232 int i, count = 0;
233
234 for (i = 0; i < state->num_cpus; i++)
235 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
236 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700237
Olav Haugan9306c802016-08-18 17:22:44 -0700238 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
239 return count;
240}
241
Olav Haugane3a3db92016-06-27 11:35:43 -0700242static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700243 const char *buf, size_t count)
244{
245 unsigned int val;
246
247 if (sscanf(buf, "%u\n", &val) != 1)
248 return -EINVAL;
249
250 state->is_big_cluster = val ? 1 : 0;
251 return count;
252}
253
Olav Haugane3a3db92016-06-27 11:35:43 -0700254static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700255{
256 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
257}
258
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700259static ssize_t store_enable(struct cluster_data *state,
260 const char *buf, size_t count)
261{
262 unsigned int val;
263 bool bval;
264
265 if (sscanf(buf, "%u\n", &val) != 1)
266 return -EINVAL;
267
268 bval = !!val;
269 if (bval != state->enable) {
270 state->enable = bval;
271 apply_need(state);
272 }
273
274 return count;
275}
276
277static ssize_t show_enable(const struct cluster_data *state, char *buf)
278{
279 return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
280}
281
Olav Haugane3a3db92016-06-27 11:35:43 -0700282static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700283{
284 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
285}
286
Olav Haugan833926cb2016-06-27 11:38:06 -0700287static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700288{
Olav Haugan833926cb2016-06-27 11:38:06 -0700289 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700290}
291
Olav Haugane3a3db92016-06-27 11:35:43 -0700292static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700293{
294 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700295 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700296 ssize_t count = 0;
297 unsigned int cpu;
298
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530299 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700300 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700301 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700302 cluster = c->cluster;
303 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700304 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700305
Olav Haugan9306c802016-08-18 17:22:44 -0700306 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700307 "CPU%u\n", cpu);
308 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700309 "\tCPU: %u\n", c->cpu);
310 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530311 "\tOnline: %u\n",
312 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700313 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530314 "\tIsolated: %u\n",
315 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700316 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700317 "\tFirst CPU: %u\n",
318 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700319 count += snprintf(buf + count, PAGE_SIZE - count,
320 "\tBusy%%: %u\n", c->busy);
321 count += snprintf(buf + count, PAGE_SIZE - count,
322 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700323 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530324 "\tNot preferred: %u\n",
325 c->not_preferred);
326 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700327 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700328 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700329 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700330 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700331 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700332 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530333 "\tNr isolated CPUs: %u\n",
334 cluster->nr_isolated_cpus);
335 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700336 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700337 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530338 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700339
340 return count;
341}
342
Olav Haugane3a3db92016-06-27 11:35:43 -0700343static ssize_t store_not_preferred(struct cluster_data *state,
344 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700345{
346 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700347 unsigned int i;
348 unsigned int val[MAX_CPUS_PER_CLUSTER];
349 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700350 int ret;
351
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530352 ret = sscanf(buf, "%u %u %u %u %u %u\n",
353 &val[0], &val[1], &val[2], &val[3],
354 &val[4], &val[5]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530355 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700356 return -EINVAL;
357
Olav Haugane3a3db92016-06-27 11:35:43 -0700358 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530359 for (i = 0; i < state->num_cpus; i++) {
360 c = &per_cpu(cpu_state, i + state->first_cpu);
361 c->not_preferred = val[i];
362 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700363 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700364
365 return count;
366}
367
Olav Haugane3a3db92016-06-27 11:35:43 -0700368static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700369{
370 struct cpu_data *c;
371 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700372 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530373 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700374
Olav Haugane3a3db92016-06-27 11:35:43 -0700375 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530376 for (i = 0; i < state->num_cpus; i++) {
377 c = &per_cpu(cpu_state, i + state->first_cpu);
378 count += scnprintf(buf + count, PAGE_SIZE - count,
379 "CPU#%d: %u\n", c->cpu, c->not_preferred);
380 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700381 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700382
383 return count;
384}
385
Olav Haugane3a3db92016-06-27 11:35:43 -0700386
Olav Haugan9306c802016-08-18 17:22:44 -0700387struct core_ctl_attr {
388 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700389 ssize_t (*show)(const struct cluster_data *, char *);
390 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700391};
392
393#define core_ctl_attr_ro(_name) \
394static struct core_ctl_attr _name = \
395__ATTR(_name, 0444, show_##_name, NULL)
396
397#define core_ctl_attr_rw(_name) \
398static struct core_ctl_attr _name = \
399__ATTR(_name, 0644, show_##_name, store_##_name)
400
401core_ctl_attr_rw(min_cpus);
402core_ctl_attr_rw(max_cpus);
403core_ctl_attr_rw(offline_delay_ms);
404core_ctl_attr_rw(busy_up_thres);
405core_ctl_attr_rw(busy_down_thres);
406core_ctl_attr_rw(task_thres);
407core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700408core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700409core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700410core_ctl_attr_ro(global_state);
411core_ctl_attr_rw(not_preferred);
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700412core_ctl_attr_rw(enable);
Olav Haugan9306c802016-08-18 17:22:44 -0700413
414static struct attribute *default_attrs[] = {
415 &min_cpus.attr,
416 &max_cpus.attr,
417 &offline_delay_ms.attr,
418 &busy_up_thres.attr,
419 &busy_down_thres.attr,
420 &task_thres.attr,
421 &is_big_cluster.attr,
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700422 &enable.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700423 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700424 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700425 &global_state.attr,
426 &not_preferred.attr,
427 NULL
428};
429
Olav Haugane3a3db92016-06-27 11:35:43 -0700430#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700431#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
432static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
433{
Olav Haugane3a3db92016-06-27 11:35:43 -0700434 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700435 struct core_ctl_attr *cattr = to_attr(attr);
436 ssize_t ret = -EIO;
437
438 if (cattr->show)
439 ret = cattr->show(data, buf);
440
441 return ret;
442}
443
444static ssize_t store(struct kobject *kobj, struct attribute *attr,
445 const char *buf, size_t count)
446{
Olav Haugane3a3db92016-06-27 11:35:43 -0700447 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700448 struct core_ctl_attr *cattr = to_attr(attr);
449 ssize_t ret = -EIO;
450
451 if (cattr->store)
452 ret = cattr->store(data, buf, count);
453
454 return ret;
455}
456
457static const struct sysfs_ops sysfs_ops = {
458 .show = show,
459 .store = store,
460};
461
462static struct kobj_type ktype_core_ctl = {
463 .sysfs_ops = &sysfs_ops,
464 .default_attrs = default_attrs,
465};
466
467/* ==================== runqueue based core count =================== */
468
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530469static void update_running_avg(void)
Olav Haugan9306c802016-08-18 17:22:44 -0700470{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530471 int avg, iowait_avg, big_avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530472 int max_nr, big_max_nr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700473 struct cluster_data *cluster;
474 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700475
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530476 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
477 &max_nr, &big_max_nr);
Olav Haugan9306c802016-08-18 17:22:44 -0700478
Olav Haugane3a3db92016-06-27 11:35:43 -0700479 for_each_cluster(cluster, index) {
480 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700481 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700482 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530483 cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
Olav Haugan9306c802016-08-18 17:22:44 -0700484 }
485}
486
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530487#define MAX_NR_THRESHOLD 4
Olav Haugan9306c802016-08-18 17:22:44 -0700488/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700489static unsigned int apply_task_need(const struct cluster_data *cluster,
490 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700491{
Olav Haugan833926cb2016-06-27 11:38:06 -0700492 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700493 if (cluster->nrrun >= cluster->task_thres)
494 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700495
Olav Haugan833926cb2016-06-27 11:38:06 -0700496 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700497 if (cluster->nrrun > new_need)
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530498 new_need = new_need + 1;
499
500 /*
501 * We don't want tasks to be overcrowded in a cluster.
502 * If any CPU has more than MAX_NR_THRESHOLD in the last
503 * window, bring another CPU to help out.
504 */
505 if (cluster->max_nr > MAX_NR_THRESHOLD)
506 new_need = new_need + 1;
Olav Haugan9306c802016-08-18 17:22:44 -0700507
508 return new_need;
509}
510
Olav Haugan9306c802016-08-18 17:22:44 -0700511/* ======================= load based core count ====================== */
512
Olav Haugane3a3db92016-06-27 11:35:43 -0700513static unsigned int apply_limits(const struct cluster_data *cluster,
514 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700515{
Olav Haugane3a3db92016-06-27 11:35:43 -0700516 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700517}
518
Olav Haugan833926cb2016-06-27 11:38:06 -0700519static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
520{
521 return cluster->num_cpus -
522 sched_isolate_count(&cluster->cpu_mask, true);
523}
524
525static bool is_active(const struct cpu_data *state)
526{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530527 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700528}
529
530static bool adjustment_possible(const struct cluster_data *cluster,
531 unsigned int need)
532{
533 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530534 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700535}
536
Olav Haugane3a3db92016-06-27 11:35:43 -0700537static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700538{
539 unsigned long flags;
540 struct cpu_data *c;
541 unsigned int need_cpus = 0, last_need, thres_idx;
542 int ret = 0;
543 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700544 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530545 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700546
Olav Haugane3a3db92016-06-27 11:35:43 -0700547 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700548 return 0;
549
550 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700551
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700552 if (cluster->boost || !cluster->enable) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700553 need_cpus = cluster->max_cpus;
554 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530555 cluster->active_cpus = get_active_cpu_count(cluster);
556 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700557 list_for_each_entry(c, &cluster->lru, sib) {
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530558 bool old_is_busy = c->is_busy;
559
Olav Haugan833926cb2016-06-27 11:38:06 -0700560 if (c->busy >= cluster->busy_up_thres[thres_idx])
561 c->is_busy = true;
562 else if (c->busy < cluster->busy_down_thres[thres_idx])
563 c->is_busy = false;
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530564
565 trace_core_ctl_set_busy(c->cpu, c->busy, old_is_busy,
566 c->is_busy);
Olav Haugan833926cb2016-06-27 11:38:06 -0700567 need_cpus += c->is_busy;
568 }
569 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700570 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700571 new_need = apply_limits(cluster, need_cpus);
572 need_flag = adjustment_possible(cluster, new_need);
573
Olav Haugane3a3db92016-06-27 11:35:43 -0700574 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530575 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700576
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530577 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700578 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530579 } else {
580 if (new_need == last_need) {
581 cluster->need_ts = now;
582 spin_unlock_irqrestore(&state_lock, flags);
583 return 0;
584 }
Olav Haugan9306c802016-08-18 17:22:44 -0700585
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530586 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700587 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700588 }
589
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530590 if (ret) {
591 cluster->need_ts = now;
592 cluster->need_cpus = new_need;
593 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530594 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700595 ret && need_flag);
596 spin_unlock_irqrestore(&state_lock, flags);
597
598 return ret && need_flag;
599}
600
Olav Haugane3a3db92016-06-27 11:35:43 -0700601static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700602{
Olav Haugane3a3db92016-06-27 11:35:43 -0700603 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700604 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700605}
606
Olav Haugan9306c802016-08-18 17:22:44 -0700607/* ========================= core count enforcement ==================== */
608
Olav Haugan833926cb2016-06-27 11:38:06 -0700609static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700610{
611 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700612
Olav Haugane3a3db92016-06-27 11:35:43 -0700613 spin_lock_irqsave(&cluster->pending_lock, flags);
614 cluster->pending = true;
615 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700616 wake_up_process(cluster->core_ctl_thread);
Olav Haugan833926cb2016-06-27 11:38:06 -0700617}
618
619static u64 core_ctl_check_timestamp;
Olav Haugan833926cb2016-06-27 11:38:06 -0700620
Olav Haugana024f472016-10-13 10:34:11 -0700621int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700622{
623 unsigned int index = 0;
624 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700625 unsigned long flags;
626 int ret = 0;
627 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700628
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530629 if (unlikely(!initialized))
630 return 0;
631
Olav Haugana024f472016-10-13 10:34:11 -0700632 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700633 for_each_cluster(cluster, index) {
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530634 if (boost) {
635 boost_state_changed = !cluster->boost;
636 ++cluster->boost;
637 } else {
638 if (!cluster->boost) {
639 pr_err("Error turning off boost. Boost already turned off\n");
640 ret = -EINVAL;
641 break;
Olav Haugana024f472016-10-13 10:34:11 -0700642 } else {
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530643 --cluster->boost;
644 boost_state_changed = !cluster->boost;
Olav Haugana024f472016-10-13 10:34:11 -0700645 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700646 }
Olav Haugan9306c802016-08-18 17:22:44 -0700647 }
Olav Haugana024f472016-10-13 10:34:11 -0700648 spin_unlock_irqrestore(&state_lock, flags);
649
Pavankumar Kondeti35b79772017-10-05 13:40:00 +0530650 if (boost_state_changed) {
651 index = 0;
652 for_each_cluster(cluster, index)
653 apply_need(cluster);
654 }
Olav Haugana024f472016-10-13 10:34:11 -0700655
656 trace_core_ctl_set_boost(cluster->boost, ret);
657
658 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700659}
Olav Haugan4d77e572016-11-14 16:14:23 -0800660EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700661
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530662void core_ctl_check(u64 window_start)
Olav Haugan9306c802016-08-18 17:22:44 -0700663{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530664 int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530665 struct cpu_data *c;
666 struct cluster_data *cluster;
667 unsigned int index = 0;
668
Olav Haugan833926cb2016-06-27 11:38:06 -0700669 if (unlikely(!initialized))
670 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700671
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530672 if (window_start == core_ctl_check_timestamp)
673 return;
Olav Haugane3a3db92016-06-27 11:35:43 -0700674
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530675 core_ctl_check_timestamp = window_start;
Olav Haugan833926cb2016-06-27 11:38:06 -0700676
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530677 for_each_possible_cpu(cpu) {
678
679 c = &per_cpu(cpu_state, cpu);
680 cluster = c->cluster;
681
682 if (!cluster || !cluster->inited)
683 continue;
684
Pavankumar Kondetieb7e1462017-10-16 13:43:14 +0530685 c->busy = sched_get_cpu_util(cpu);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530686 }
687
688 update_running_avg();
689
690 for_each_cluster(cluster, index) {
691 if (eval_need(cluster))
692 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700693 }
Olav Haugan9306c802016-08-18 17:22:44 -0700694}
695
Olav Haugan833926cb2016-06-27 11:38:06 -0700696static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700697{
Olav Haugan833926cb2016-06-27 11:38:06 -0700698 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700699
Olav Haugan833926cb2016-06-27 11:38:06 -0700700 spin_lock_irqsave(&state_lock, flags);
701 list_del(&cpu_data->sib);
702 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
703 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700704}
705
Joonwoo Parkbf785702017-09-26 17:24:22 -0700706#ifdef CONFIG_SCHED_CORE_ROTATE
707static void cpuset_next(struct cluster_data *cluster)
708{
709 int cpus_needed = cluster->num_cpus - cluster->min_cpus;
710
711 cluster->set_cur++;
712 cluster->set_cur = min(cluster->set_cur, cluster->set_max);
713
714 /*
715 * This loop generates bit sets from 0 to pow(num_cpus, 2) - 1.
716 * We start loop from set_cur to set_cur - 1 and break when weight of
717 * set_cur equals to cpus_needed.
718 */
719
720 while (1) {
721 if (bitmap_weight(&cluster->set_cur, BITS_PER_LONG) ==
722 cpus_needed) {
723 break;
724 }
725 cluster->set_cur++;
726 cluster->set_cur = min(cluster->set_cur, cluster->set_max);
727 if (cluster->set_cur == cluster->set_max)
728 /* roll over */
729 cluster->set_cur = 0;
730 };
731
732 pr_debug("first_cpu=%d cpus_needed=%d set_cur=0x%lx\n",
733 cluster->first_cpu, cpus_needed, cluster->set_cur);
734}
735
736static bool should_we_isolate(int cpu, struct cluster_data *cluster)
737{
738 /* cpu should be part of cluster */
739 return !!(cluster->set_cur & (1 << (cpu - cluster->first_cpu)));
740}
741
742static void core_ctl_resume(void)
743{
744 unsigned int i = 0;
745 struct cluster_data *cluster;
746
747 /* move to next isolation cpu set */
748 for_each_cluster(cluster, i)
749 cpuset_next(cluster);
750}
751
752static struct syscore_ops core_ctl_syscore_ops = {
753 .resume = core_ctl_resume,
754};
755
756#else
757
758static void cpuset_next(struct cluster_data *cluster) { }
759
760static bool should_we_isolate(int cpu, struct cluster_data *cluster)
761{
762 return true;
763}
764
765#endif
766
Olav Haugan833926cb2016-06-27 11:38:06 -0700767static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700768{
Olav Haugan9306c802016-08-18 17:22:44 -0700769 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800770 unsigned long flags;
771 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530772 unsigned int nr_isolated = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700773
Olav Haugan85970732016-11-08 13:45:01 -0800774 /*
775 * Protect against entry being removed (and added at tail) by other
776 * thread (hotplug).
777 */
778 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700779 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800780 if (!num_cpus--)
781 break;
782
Olav Haugan833926cb2016-06-27 11:38:06 -0700783 if (!is_active(c))
784 continue;
785 if (cluster->active_cpus == need)
786 break;
787 /* Don't offline busy CPUs. */
788 if (c->is_busy)
789 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700790
Joonwoo Parkbf785702017-09-26 17:24:22 -0700791 if (!should_we_isolate(c->cpu, cluster))
792 continue;
793
Olav Haugan85970732016-11-08 13:45:01 -0800794 spin_unlock_irqrestore(&state_lock, flags);
795
Olav Haugan833926cb2016-06-27 11:38:06 -0700796 pr_debug("Trying to isolate CPU%u\n", c->cpu);
797 if (!sched_isolate_cpu(c->cpu)) {
798 c->isolated_by_us = true;
799 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530800 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700801 } else {
802 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700803 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700804 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800805 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700806 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530807 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800808 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700809
Olav Haugan833926cb2016-06-27 11:38:06 -0700810 /*
811 * If the number of active CPUs is within the limits, then
812 * don't force isolation of any busy CPUs.
813 */
814 if (cluster->active_cpus <= cluster->max_cpus)
815 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700816
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530817 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800818 num_cpus = cluster->num_cpus;
819 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700820 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800821 if (!num_cpus--)
822 break;
823
Olav Haugan833926cb2016-06-27 11:38:06 -0700824 if (!is_active(c))
825 continue;
826 if (cluster->active_cpus <= cluster->max_cpus)
827 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700828
Olav Haugan85970732016-11-08 13:45:01 -0800829 spin_unlock_irqrestore(&state_lock, flags);
830
Olav Haugan833926cb2016-06-27 11:38:06 -0700831 pr_debug("Trying to isolate CPU%u\n", c->cpu);
832 if (!sched_isolate_cpu(c->cpu)) {
833 c->isolated_by_us = true;
834 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530835 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700836 } else {
837 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700838 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700839 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800840 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700841 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530842 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800843 spin_unlock_irqrestore(&state_lock, flags);
844
Olav Haugan9306c802016-08-18 17:22:44 -0700845}
846
Olav Haugan833926cb2016-06-27 11:38:06 -0700847static void __try_to_unisolate(struct cluster_data *cluster,
848 unsigned int need, bool force)
849{
850 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800851 unsigned long flags;
852 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530853 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700854
Olav Haugan85970732016-11-08 13:45:01 -0800855 /*
856 * Protect against entry being removed (and added at tail) by other
857 * thread (hotplug).
858 */
859 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700860 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800861 if (!num_cpus--)
862 break;
863
Olav Haugan833926cb2016-06-27 11:38:06 -0700864 if (!c->isolated_by_us)
865 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530866 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700867 (!force && c->not_preferred))
868 continue;
869 if (cluster->active_cpus == need)
870 break;
871
Olav Haugan85970732016-11-08 13:45:01 -0800872 spin_unlock_irqrestore(&state_lock, flags);
873
Olav Haugan833926cb2016-06-27 11:38:06 -0700874 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
875 if (!sched_unisolate_cpu(c->cpu)) {
876 c->isolated_by_us = false;
877 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530878 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700879 } else {
880 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
881 }
882 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800883 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700884 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530885 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800886 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700887}
888
889static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
890{
891 bool force_use_non_preferred = false;
892
893 __try_to_unisolate(cluster, need, force_use_non_preferred);
894
895 if (cluster->active_cpus == need)
896 return;
897
898 force_use_non_preferred = true;
899 __try_to_unisolate(cluster, need, force_use_non_preferred);
900}
901
902static void __ref do_core_ctl(struct cluster_data *cluster)
903{
904 unsigned int need;
905
906 need = apply_limits(cluster, cluster->need_cpus);
907
908 if (adjustment_possible(cluster, need)) {
909 pr_debug("Trying to adjust group %u from %u to %u\n",
910 cluster->first_cpu, cluster->active_cpus, need);
911
912 if (cluster->active_cpus > need)
913 try_to_isolate(cluster, need);
914 else if (cluster->active_cpus < need)
915 try_to_unisolate(cluster, need);
916 }
917}
918
919static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700920{
Olav Haugane3a3db92016-06-27 11:35:43 -0700921 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700922 unsigned long flags;
923
924 while (1) {
925 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700926 spin_lock_irqsave(&cluster->pending_lock, flags);
927 if (!cluster->pending) {
928 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700929 schedule();
930 if (kthread_should_stop())
931 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700932 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700933 }
934 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700935 cluster->pending = false;
936 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700937
Olav Haugan833926cb2016-06-27 11:38:06 -0700938 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700939 }
940
941 return 0;
942}
943
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700944static int isolation_cpuhp_state(unsigned int cpu, bool online)
Olav Haugan9306c802016-08-18 17:22:44 -0700945{
Olav Haugan9306c802016-08-18 17:22:44 -0700946 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700947 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700948 unsigned int need;
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700949 bool do_wakeup = false, unisolated = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530950 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700951
Olav Haugane3a3db92016-06-27 11:35:43 -0700952 if (unlikely(!cluster || !cluster->inited))
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700953 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700954
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700955 if (online) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700956 cluster->active_cpus = get_active_cpu_count(cluster);
957
Olav Haugan9306c802016-08-18 17:22:44 -0700958 /*
959 * Moving to the end of the list should only happen in
960 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
961 * infinite list traversal when thermal (or other entities)
962 * reject trying to online CPUs.
963 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700964 move_cpu_lru(state);
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700965 } else {
Olav Haugan833926cb2016-06-27 11:38:06 -0700966 /*
967 * We don't want to have a CPU both offline and isolated.
968 * So unisolate a CPU that went down if it was isolated by us.
969 */
970 if (state->isolated_by_us) {
971 sched_unisolate_cpu_unlocked(cpu);
972 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530973 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -0700974 }
975
Olav Haugan9306c802016-08-18 17:22:44 -0700976 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700977 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700978
Olav Haugan9306c802016-08-18 17:22:44 -0700979 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700980 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700981 }
982
Olav Haugan833926cb2016-06-27 11:38:06 -0700983 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530984 spin_lock_irqsave(&state_lock, flags);
985 if (unisolated)
986 cluster->nr_isolated_cpus--;
987 do_wakeup = adjustment_possible(cluster, need);
988 spin_unlock_irqrestore(&state_lock, flags);
989 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -0700990 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700991
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700992 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700993}
994
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700995static int core_ctl_isolation_online_cpu(unsigned int cpu)
996{
997 return isolation_cpuhp_state(cpu, true);
998}
999
1000static int core_ctl_isolation_dead_cpu(unsigned int cpu)
1001{
1002 return isolation_cpuhp_state(cpu, false);
1003}
Olav Haugan9306c802016-08-18 17:22:44 -07001004
1005/* ============================ init code ============================== */
1006
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301007static cpumask_var_t core_ctl_disable_cpumask;
1008static bool core_ctl_disable_cpumask_present;
1009
1010static int __init core_ctl_disable_setup(char *str)
1011{
1012 if (!*str)
1013 return -EINVAL;
1014
1015 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
1016
1017 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
1018 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
1019 return -EINVAL;
1020 }
1021
1022 core_ctl_disable_cpumask_present = true;
1023 pr_info("disable_cpumask=%*pbl\n",
1024 cpumask_pr_args(core_ctl_disable_cpumask));
1025
1026 return 0;
1027}
1028early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
1029
1030static bool should_skip(const struct cpumask *mask)
1031{
1032 if (!core_ctl_disable_cpumask_present)
1033 return false;
1034
1035 /*
1036 * We operate on a cluster basis. Disable the core_ctl for
1037 * a cluster, if all of it's cpus are specified in
1038 * core_ctl_disable_cpumask
1039 */
1040 return cpumask_subset(mask, core_ctl_disable_cpumask);
1041}
1042
Olav Haugane3a3db92016-06-27 11:35:43 -07001043static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
1044{
1045 unsigned int i;
1046
1047 for (i = 0; i < num_clusters; ++i) {
1048 if (cluster_state[i].first_cpu == first_cpu)
1049 return &cluster_state[i];
1050 }
1051
1052 return NULL;
1053}
1054
1055static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -07001056{
1057 struct device *dev;
1058 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001059 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001060 struct cpu_data *state;
1061 unsigned int cpu;
1062 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1063
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301064 if (should_skip(mask))
1065 return 0;
1066
Olav Haugane3a3db92016-06-27 11:35:43 -07001067 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001068 return 0;
1069
1070 dev = get_cpu_device(first_cpu);
1071 if (!dev)
1072 return -ENODEV;
1073
1074 pr_info("Creating CPU group %d\n", first_cpu);
1075
Olav Haugane3a3db92016-06-27 11:35:43 -07001076 if (num_clusters == MAX_CLUSTERS) {
1077 pr_err("Unsupported number of clusters. Only %u supported\n",
1078 MAX_CLUSTERS);
1079 return -EINVAL;
1080 }
1081 cluster = &cluster_state[num_clusters];
1082 ++num_clusters;
1083
Olav Haugan833926cb2016-06-27 11:38:06 -07001084 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001085 cluster->num_cpus = cpumask_weight(mask);
1086 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001087 pr_err("HW configuration not supported\n");
1088 return -EINVAL;
1089 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001090 cluster->first_cpu = first_cpu;
1091 cluster->min_cpus = 1;
1092 cluster->max_cpus = cluster->num_cpus;
1093 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001094 cluster->offline_delay_ms = 100;
1095 cluster->task_thres = UINT_MAX;
1096 cluster->nrrun = cluster->num_cpus;
Joonwoo Parkbf785702017-09-26 17:24:22 -07001097#ifdef CONFIG_SCHED_CORE_ROTATE
1098 cluster->set_max = cluster->num_cpus * cluster->num_cpus;
1099 /* by default mark all cpus as eligible */
1100 cluster->set_cur = cluster->set_max - 1;
1101#endif
Sabyasachi Singh9453cb22017-05-09 18:50:19 -07001102 cluster->enable = true;
Olav Haugane3a3db92016-06-27 11:35:43 -07001103 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001104 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001105
1106 for_each_cpu(cpu, mask) {
1107 pr_info("Init CPU%u state\n", cpu);
1108
1109 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001110 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001111 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001112 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001113 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001114 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001115
Olav Haugan833926cb2016-06-27 11:38:06 -07001116 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001117 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001118 if (IS_ERR(cluster->core_ctl_thread))
1119 return PTR_ERR(cluster->core_ctl_thread);
1120
1121 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001122 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001123
Olav Haugane3a3db92016-06-27 11:35:43 -07001124 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001125
Olav Haugane3a3db92016-06-27 11:35:43 -07001126 kobject_init(&cluster->kobj, &ktype_core_ctl);
1127 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001128}
1129
Olav Haugan9306c802016-08-18 17:22:44 -07001130static int __init core_ctl_init(void)
1131{
Olav Haugan9306c802016-08-18 17:22:44 -07001132 unsigned int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301133 struct cpumask cpus = *cpu_possible_mask;
Olav Haugan9306c802016-08-18 17:22:44 -07001134
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301135 if (should_skip(cpu_possible_mask))
1136 return 0;
1137
Joonwoo Parkbf785702017-09-26 17:24:22 -07001138#ifdef CONFIG_SCHED_CORE_ROTATE
1139 register_syscore_ops(&core_ctl_syscore_ops);
1140#endif
1141
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001142 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1143 "core_ctl/isolation:online",
1144 core_ctl_isolation_online_cpu, NULL);
1145
1146 cpuhp_setup_state_nocalls(CPUHP_CORE_CTL_ISOLATION_DEAD,
1147 "core_ctl/isolation:dead",
1148 NULL, core_ctl_isolation_dead_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -07001149
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301150 for_each_cpu(cpu, &cpus) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001151 int ret;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301152 const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001153
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301154 ret = cluster_init(cluster_cpus);
1155 if (ret)
1156 pr_warn("unable to create core ctl group: %d\n", ret);
1157 cpumask_andnot(&cpus, &cpus, cluster_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -07001158 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001159 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001160 return 0;
1161}
1162
1163late_initcall(core_ctl_init);