blob: 4c3bf5266344490832d1a46bdbac613993b7e866 [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
16#include <linux/notifier.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070020#include <linux/kthread.h>
21#include <linux/sched.h>
22#include <linux/sched/rt.h>
23
Olav Haugan09bcc682016-09-02 17:12:20 -070024#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070025
Pavankumar Kondeti4b621962017-04-10 14:08:31 +053026#define MAX_CPUS_PER_CLUSTER 6
Olav Haugane3a3db92016-06-27 11:35:43 -070027#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070028
Olav Haugane3a3db92016-06-27 11:35:43 -070029struct cluster_data {
30 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070031 unsigned int min_cpus;
32 unsigned int max_cpus;
33 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070034 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
35 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070036 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053038 unsigned int nr_isolated_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070039 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070040 unsigned int need_cpus;
41 unsigned int task_thres;
Pavankumar Kondeti005309282017-05-10 15:43:29 +053042 unsigned int max_nr;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053043 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070044 struct list_head lru;
45 bool pending;
46 spinlock_t pending_lock;
47 bool is_big_cluster;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -070048 bool enable;
Olav Haugan9306c802016-08-18 17:22:44 -070049 int nrrun;
Olav Haugan833926cb2016-06-27 11:38:06 -070050 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070051 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070052 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070053 struct kobject kobj;
54};
55
Olav Haugane3a3db92016-06-27 11:35:43 -070056struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070057 bool is_busy;
58 unsigned int busy;
59 unsigned int cpu;
60 bool not_preferred;
61 struct cluster_data *cluster;
62 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070063 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070064};
65
Olav Haugan9306c802016-08-18 17:22:44 -070066static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070067static struct cluster_data cluster_state[MAX_CLUSTERS];
68static unsigned int num_clusters;
69
70#define for_each_cluster(cluster, idx) \
71 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
72 (idx)++, (cluster) = &cluster_state[idx])
73
Olav Haugan9306c802016-08-18 17:22:44 -070074static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070075static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070076static void wake_up_core_ctl_thread(struct cluster_data *state);
77static bool initialized;
78
79static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070080
81/* ========================= sysfs interface =========================== */
82
Olav Haugane3a3db92016-06-27 11:35:43 -070083static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070084 const char *buf, size_t count)
85{
86 unsigned int val;
87
88 if (sscanf(buf, "%u\n", &val) != 1)
89 return -EINVAL;
90
91 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070092 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070093
94 return count;
95}
96
Olav Haugane3a3db92016-06-27 11:35:43 -070097static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070098{
99 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
100}
101
Olav Haugane3a3db92016-06-27 11:35:43 -0700102static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700103 const char *buf, size_t count)
104{
105 unsigned int val;
106
107 if (sscanf(buf, "%u\n", &val) != 1)
108 return -EINVAL;
109
110 val = min(val, state->num_cpus);
111 state->max_cpus = val;
112 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700113 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700114
115 return count;
116}
117
Olav Haugane3a3db92016-06-27 11:35:43 -0700118static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700119{
120 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
121}
122
Olav Haugane3a3db92016-06-27 11:35:43 -0700123static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700124 const char *buf, size_t count)
125{
126 unsigned int val;
127
128 if (sscanf(buf, "%u\n", &val) != 1)
129 return -EINVAL;
130
131 state->offline_delay_ms = val;
132 apply_need(state);
133
134 return count;
135}
136
Olav Haugane3a3db92016-06-27 11:35:43 -0700137static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700138{
139 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
140}
141
Olav Haugane3a3db92016-06-27 11:35:43 -0700142static ssize_t store_task_thres(struct cluster_data *state,
143 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700144{
145 unsigned int val;
146
147 if (sscanf(buf, "%u\n", &val) != 1)
148 return -EINVAL;
149
150 if (val < state->num_cpus)
151 return -EINVAL;
152
153 state->task_thres = val;
154 apply_need(state);
155
156 return count;
157}
158
Olav Haugane3a3db92016-06-27 11:35:43 -0700159static ssize_t show_offline_delay_ms(const struct cluster_data *state,
160 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700161{
162 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
163}
164
Olav Haugane3a3db92016-06-27 11:35:43 -0700165static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700166 const char *buf, size_t count)
167{
Olav Haugane3a3db92016-06-27 11:35:43 -0700168 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700169 int ret, i;
170
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530171 ret = sscanf(buf, "%u %u %u %u %u %u\n",
172 &val[0], &val[1], &val[2], &val[3],
173 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700174 if (ret != 1 && ret != state->num_cpus)
175 return -EINVAL;
176
177 if (ret == 1) {
178 for (i = 0; i < state->num_cpus; i++)
179 state->busy_up_thres[i] = val[0];
180 } else {
181 for (i = 0; i < state->num_cpus; i++)
182 state->busy_up_thres[i] = val[i];
183 }
184 apply_need(state);
185 return count;
186}
187
Olav Haugane3a3db92016-06-27 11:35:43 -0700188static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700189{
190 int i, count = 0;
191
192 for (i = 0; i < state->num_cpus; i++)
193 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
194 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700195
Olav Haugan9306c802016-08-18 17:22:44 -0700196 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
197 return count;
198}
199
Olav Haugane3a3db92016-06-27 11:35:43 -0700200static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700201 const char *buf, size_t count)
202{
Olav Haugane3a3db92016-06-27 11:35:43 -0700203 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700204 int ret, i;
205
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530206 ret = sscanf(buf, "%u %u %u %u %u %u\n",
207 &val[0], &val[1], &val[2], &val[3],
208 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700209 if (ret != 1 && ret != state->num_cpus)
210 return -EINVAL;
211
212 if (ret == 1) {
213 for (i = 0; i < state->num_cpus; i++)
214 state->busy_down_thres[i] = val[0];
215 } else {
216 for (i = 0; i < state->num_cpus; i++)
217 state->busy_down_thres[i] = val[i];
218 }
219 apply_need(state);
220 return count;
221}
222
Olav Haugane3a3db92016-06-27 11:35:43 -0700223static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700224{
225 int i, count = 0;
226
227 for (i = 0; i < state->num_cpus; i++)
228 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
229 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700230
Olav Haugan9306c802016-08-18 17:22:44 -0700231 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
232 return count;
233}
234
Olav Haugane3a3db92016-06-27 11:35:43 -0700235static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700236 const char *buf, size_t count)
237{
238 unsigned int val;
239
240 if (sscanf(buf, "%u\n", &val) != 1)
241 return -EINVAL;
242
243 state->is_big_cluster = val ? 1 : 0;
244 return count;
245}
246
Olav Haugane3a3db92016-06-27 11:35:43 -0700247static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700248{
249 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
250}
251
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700252static ssize_t store_enable(struct cluster_data *state,
253 const char *buf, size_t count)
254{
255 unsigned int val;
256 bool bval;
257
258 if (sscanf(buf, "%u\n", &val) != 1)
259 return -EINVAL;
260
261 bval = !!val;
262 if (bval != state->enable) {
263 state->enable = bval;
264 apply_need(state);
265 }
266
267 return count;
268}
269
270static ssize_t show_enable(const struct cluster_data *state, char *buf)
271{
272 return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
273}
274
Olav Haugane3a3db92016-06-27 11:35:43 -0700275static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700276{
277 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
278}
279
Olav Haugan833926cb2016-06-27 11:38:06 -0700280static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700281{
Olav Haugan833926cb2016-06-27 11:38:06 -0700282 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700283}
284
Olav Haugane3a3db92016-06-27 11:35:43 -0700285static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700286{
287 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700288 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700289 ssize_t count = 0;
290 unsigned int cpu;
291
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530292 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700293 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700294 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700295 cluster = c->cluster;
296 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700297 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700298
Olav Haugan9306c802016-08-18 17:22:44 -0700299 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700300 "CPU%u\n", cpu);
301 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700302 "\tCPU: %u\n", c->cpu);
303 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530304 "\tOnline: %u\n",
305 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700306 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530307 "\tIsolated: %u\n",
308 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700309 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700310 "\tFirst CPU: %u\n",
311 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700312 count += snprintf(buf + count, PAGE_SIZE - count,
313 "\tBusy%%: %u\n", c->busy);
314 count += snprintf(buf + count, PAGE_SIZE - count,
315 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700316 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530317 "\tNot preferred: %u\n",
318 c->not_preferred);
319 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700320 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700321 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700322 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700323 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700324 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700325 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530326 "\tNr isolated CPUs: %u\n",
327 cluster->nr_isolated_cpus);
328 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700329 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700330 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530331 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700332
333 return count;
334}
335
Olav Haugane3a3db92016-06-27 11:35:43 -0700336static ssize_t store_not_preferred(struct cluster_data *state,
337 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700338{
339 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700340 unsigned int i;
341 unsigned int val[MAX_CPUS_PER_CLUSTER];
342 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700343 int ret;
344
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530345 ret = sscanf(buf, "%u %u %u %u %u %u\n",
346 &val[0], &val[1], &val[2], &val[3],
347 &val[4], &val[5]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530348 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700349 return -EINVAL;
350
Olav Haugane3a3db92016-06-27 11:35:43 -0700351 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530352 for (i = 0; i < state->num_cpus; i++) {
353 c = &per_cpu(cpu_state, i + state->first_cpu);
354 c->not_preferred = val[i];
355 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700356 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700357
358 return count;
359}
360
Olav Haugane3a3db92016-06-27 11:35:43 -0700361static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700362{
363 struct cpu_data *c;
364 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700365 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530366 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700367
Olav Haugane3a3db92016-06-27 11:35:43 -0700368 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530369 for (i = 0; i < state->num_cpus; i++) {
370 c = &per_cpu(cpu_state, i + state->first_cpu);
371 count += scnprintf(buf + count, PAGE_SIZE - count,
372 "CPU#%d: %u\n", c->cpu, c->not_preferred);
373 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700374 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700375
376 return count;
377}
378
Olav Haugane3a3db92016-06-27 11:35:43 -0700379
Olav Haugan9306c802016-08-18 17:22:44 -0700380struct core_ctl_attr {
381 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700382 ssize_t (*show)(const struct cluster_data *, char *);
383 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700384};
385
386#define core_ctl_attr_ro(_name) \
387static struct core_ctl_attr _name = \
388__ATTR(_name, 0444, show_##_name, NULL)
389
390#define core_ctl_attr_rw(_name) \
391static struct core_ctl_attr _name = \
392__ATTR(_name, 0644, show_##_name, store_##_name)
393
394core_ctl_attr_rw(min_cpus);
395core_ctl_attr_rw(max_cpus);
396core_ctl_attr_rw(offline_delay_ms);
397core_ctl_attr_rw(busy_up_thres);
398core_ctl_attr_rw(busy_down_thres);
399core_ctl_attr_rw(task_thres);
400core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700401core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700402core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700403core_ctl_attr_ro(global_state);
404core_ctl_attr_rw(not_preferred);
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700405core_ctl_attr_rw(enable);
Olav Haugan9306c802016-08-18 17:22:44 -0700406
407static struct attribute *default_attrs[] = {
408 &min_cpus.attr,
409 &max_cpus.attr,
410 &offline_delay_ms.attr,
411 &busy_up_thres.attr,
412 &busy_down_thres.attr,
413 &task_thres.attr,
414 &is_big_cluster.attr,
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700415 &enable.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700416 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700417 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700418 &global_state.attr,
419 &not_preferred.attr,
420 NULL
421};
422
Olav Haugane3a3db92016-06-27 11:35:43 -0700423#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700424#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
425static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
426{
Olav Haugane3a3db92016-06-27 11:35:43 -0700427 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700428 struct core_ctl_attr *cattr = to_attr(attr);
429 ssize_t ret = -EIO;
430
431 if (cattr->show)
432 ret = cattr->show(data, buf);
433
434 return ret;
435}
436
437static ssize_t store(struct kobject *kobj, struct attribute *attr,
438 const char *buf, size_t count)
439{
Olav Haugane3a3db92016-06-27 11:35:43 -0700440 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700441 struct core_ctl_attr *cattr = to_attr(attr);
442 ssize_t ret = -EIO;
443
444 if (cattr->store)
445 ret = cattr->store(data, buf, count);
446
447 return ret;
448}
449
450static const struct sysfs_ops sysfs_ops = {
451 .show = show,
452 .store = store,
453};
454
455static struct kobj_type ktype_core_ctl = {
456 .sysfs_ops = &sysfs_ops,
457 .default_attrs = default_attrs,
458};
459
460/* ==================== runqueue based core count =================== */
461
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530462static void update_running_avg(void)
Olav Haugan9306c802016-08-18 17:22:44 -0700463{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530464 int avg, iowait_avg, big_avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530465 int max_nr, big_max_nr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700466 struct cluster_data *cluster;
467 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700468
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530469 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
470 &max_nr, &big_max_nr);
Olav Haugan9306c802016-08-18 17:22:44 -0700471
Olav Haugane3a3db92016-06-27 11:35:43 -0700472 for_each_cluster(cluster, index) {
473 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700474 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700475 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530476 cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
Olav Haugan9306c802016-08-18 17:22:44 -0700477 }
478}
479
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530480#define MAX_NR_THRESHOLD 4
Olav Haugan9306c802016-08-18 17:22:44 -0700481/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700482static unsigned int apply_task_need(const struct cluster_data *cluster,
483 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700484{
Olav Haugan833926cb2016-06-27 11:38:06 -0700485 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700486 if (cluster->nrrun >= cluster->task_thres)
487 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700488
Olav Haugan833926cb2016-06-27 11:38:06 -0700489 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700490 if (cluster->nrrun > new_need)
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530491 new_need = new_need + 1;
492
493 /*
494 * We don't want tasks to be overcrowded in a cluster.
495 * If any CPU has more than MAX_NR_THRESHOLD in the last
496 * window, bring another CPU to help out.
497 */
498 if (cluster->max_nr > MAX_NR_THRESHOLD)
499 new_need = new_need + 1;
Olav Haugan9306c802016-08-18 17:22:44 -0700500
501 return new_need;
502}
503
Olav Haugan9306c802016-08-18 17:22:44 -0700504/* ======================= load based core count ====================== */
505
Olav Haugane3a3db92016-06-27 11:35:43 -0700506static unsigned int apply_limits(const struct cluster_data *cluster,
507 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700508{
Olav Haugane3a3db92016-06-27 11:35:43 -0700509 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700510}
511
Olav Haugan833926cb2016-06-27 11:38:06 -0700512static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
513{
514 return cluster->num_cpus -
515 sched_isolate_count(&cluster->cpu_mask, true);
516}
517
518static bool is_active(const struct cpu_data *state)
519{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530520 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700521}
522
523static bool adjustment_possible(const struct cluster_data *cluster,
524 unsigned int need)
525{
526 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530527 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700528}
529
Olav Haugane3a3db92016-06-27 11:35:43 -0700530static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700531{
532 unsigned long flags;
533 struct cpu_data *c;
534 unsigned int need_cpus = 0, last_need, thres_idx;
535 int ret = 0;
536 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700537 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530538 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700539
Olav Haugane3a3db92016-06-27 11:35:43 -0700540 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700541 return 0;
542
543 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700544
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700545 if (cluster->boost || !cluster->enable) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700546 need_cpus = cluster->max_cpus;
547 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530548 cluster->active_cpus = get_active_cpu_count(cluster);
549 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700550 list_for_each_entry(c, &cluster->lru, sib) {
551 if (c->busy >= cluster->busy_up_thres[thres_idx])
552 c->is_busy = true;
553 else if (c->busy < cluster->busy_down_thres[thres_idx])
554 c->is_busy = false;
555 need_cpus += c->is_busy;
556 }
557 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700558 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700559 new_need = apply_limits(cluster, need_cpus);
560 need_flag = adjustment_possible(cluster, new_need);
561
Olav Haugane3a3db92016-06-27 11:35:43 -0700562 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530563 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700564
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530565 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700566 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530567 } else {
568 if (new_need == last_need) {
569 cluster->need_ts = now;
570 spin_unlock_irqrestore(&state_lock, flags);
571 return 0;
572 }
Olav Haugan9306c802016-08-18 17:22:44 -0700573
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530574 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700575 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700576 }
577
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530578 if (ret) {
579 cluster->need_ts = now;
580 cluster->need_cpus = new_need;
581 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530582 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700583 ret && need_flag);
584 spin_unlock_irqrestore(&state_lock, flags);
585
586 return ret && need_flag;
587}
588
Olav Haugane3a3db92016-06-27 11:35:43 -0700589static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700590{
Olav Haugane3a3db92016-06-27 11:35:43 -0700591 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700592 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700593}
594
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530595static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy)
Olav Haugan9306c802016-08-18 17:22:44 -0700596{
Olav Haugan9306c802016-08-18 17:22:44 -0700597 unsigned int old_is_busy = c->is_busy;
598
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530599 if (c->busy == busy)
600 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700601
Olav Haugan9306c802016-08-18 17:22:44 -0700602 c->busy = busy;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530603 trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700604}
605
606/* ========================= core count enforcement ==================== */
607
Olav Haugan833926cb2016-06-27 11:38:06 -0700608static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700609{
610 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700611
Olav Haugane3a3db92016-06-27 11:35:43 -0700612 spin_lock_irqsave(&cluster->pending_lock, flags);
613 cluster->pending = true;
614 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700615 wake_up_process(cluster->core_ctl_thread);
Olav Haugan833926cb2016-06-27 11:38:06 -0700616}
617
618static u64 core_ctl_check_timestamp;
Olav Haugan833926cb2016-06-27 11:38:06 -0700619
Olav Haugana024f472016-10-13 10:34:11 -0700620int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700621{
622 unsigned int index = 0;
623 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700624 unsigned long flags;
625 int ret = 0;
626 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700627
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530628 if (unlikely(!initialized))
629 return 0;
630
Olav Haugana024f472016-10-13 10:34:11 -0700631 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700632 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700633 if (cluster->is_big_cluster) {
634 if (boost) {
635 boost_state_changed = !cluster->boost;
636 ++cluster->boost;
637 } else {
638 if (!cluster->boost) {
639 pr_err("Error turning off boost. Boost already turned off\n");
640 ret = -EINVAL;
641 } else {
642 --cluster->boost;
643 boost_state_changed = !cluster->boost;
644 }
645 }
646 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700647 }
Olav Haugan9306c802016-08-18 17:22:44 -0700648 }
Olav Haugana024f472016-10-13 10:34:11 -0700649 spin_unlock_irqrestore(&state_lock, flags);
650
651 if (boost_state_changed)
652 apply_need(cluster);
653
654 trace_core_ctl_set_boost(cluster->boost, ret);
655
656 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700657}
Olav Haugan4d77e572016-11-14 16:14:23 -0800658EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700659
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530660void core_ctl_check(u64 window_start)
Olav Haugan9306c802016-08-18 17:22:44 -0700661{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530662 int cpu;
663 unsigned int busy;
664 struct cpu_data *c;
665 struct cluster_data *cluster;
666 unsigned int index = 0;
667
Olav Haugan833926cb2016-06-27 11:38:06 -0700668 if (unlikely(!initialized))
669 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700670
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530671 if (window_start == core_ctl_check_timestamp)
672 return;
Olav Haugane3a3db92016-06-27 11:35:43 -0700673
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530674 core_ctl_check_timestamp = window_start;
Olav Haugan833926cb2016-06-27 11:38:06 -0700675
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530676 for_each_possible_cpu(cpu) {
677
678 c = &per_cpu(cpu_state, cpu);
679 cluster = c->cluster;
680
681 if (!cluster || !cluster->inited)
682 continue;
683
684 busy = sched_get_cpu_util(cpu);
685 core_ctl_set_busy(c, busy);
686 }
687
688 update_running_avg();
689
690 for_each_cluster(cluster, index) {
691 if (eval_need(cluster))
692 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700693 }
Olav Haugan9306c802016-08-18 17:22:44 -0700694}
695
Olav Haugan833926cb2016-06-27 11:38:06 -0700696static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700697{
Olav Haugan833926cb2016-06-27 11:38:06 -0700698 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700699
Olav Haugan833926cb2016-06-27 11:38:06 -0700700 spin_lock_irqsave(&state_lock, flags);
701 list_del(&cpu_data->sib);
702 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
703 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700704}
705
Olav Haugan833926cb2016-06-27 11:38:06 -0700706static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700707{
Olav Haugan9306c802016-08-18 17:22:44 -0700708 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800709 unsigned long flags;
710 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530711 unsigned int nr_isolated = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700712
Olav Haugan85970732016-11-08 13:45:01 -0800713 /*
714 * Protect against entry being removed (and added at tail) by other
715 * thread (hotplug).
716 */
717 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700718 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800719 if (!num_cpus--)
720 break;
721
Olav Haugan833926cb2016-06-27 11:38:06 -0700722 if (!is_active(c))
723 continue;
724 if (cluster->active_cpus == need)
725 break;
726 /* Don't offline busy CPUs. */
727 if (c->is_busy)
728 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700729
Olav Haugan85970732016-11-08 13:45:01 -0800730 spin_unlock_irqrestore(&state_lock, flags);
731
Olav Haugan833926cb2016-06-27 11:38:06 -0700732 pr_debug("Trying to isolate CPU%u\n", c->cpu);
733 if (!sched_isolate_cpu(c->cpu)) {
734 c->isolated_by_us = true;
735 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530736 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700737 } else {
738 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700739 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700740 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800741 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700742 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530743 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800744 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700745
Olav Haugan833926cb2016-06-27 11:38:06 -0700746 /*
747 * If the number of active CPUs is within the limits, then
748 * don't force isolation of any busy CPUs.
749 */
750 if (cluster->active_cpus <= cluster->max_cpus)
751 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700752
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530753 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800754 num_cpus = cluster->num_cpus;
755 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700756 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800757 if (!num_cpus--)
758 break;
759
Olav Haugan833926cb2016-06-27 11:38:06 -0700760 if (!is_active(c))
761 continue;
762 if (cluster->active_cpus <= cluster->max_cpus)
763 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700764
Olav Haugan85970732016-11-08 13:45:01 -0800765 spin_unlock_irqrestore(&state_lock, flags);
766
Olav Haugan833926cb2016-06-27 11:38:06 -0700767 pr_debug("Trying to isolate CPU%u\n", c->cpu);
768 if (!sched_isolate_cpu(c->cpu)) {
769 c->isolated_by_us = true;
770 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530771 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700772 } else {
773 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700774 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700775 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800776 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700777 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530778 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800779 spin_unlock_irqrestore(&state_lock, flags);
780
Olav Haugan9306c802016-08-18 17:22:44 -0700781}
782
Olav Haugan833926cb2016-06-27 11:38:06 -0700783static void __try_to_unisolate(struct cluster_data *cluster,
784 unsigned int need, bool force)
785{
786 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800787 unsigned long flags;
788 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530789 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700790
Olav Haugan85970732016-11-08 13:45:01 -0800791 /*
792 * Protect against entry being removed (and added at tail) by other
793 * thread (hotplug).
794 */
795 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700796 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800797 if (!num_cpus--)
798 break;
799
Olav Haugan833926cb2016-06-27 11:38:06 -0700800 if (!c->isolated_by_us)
801 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530802 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700803 (!force && c->not_preferred))
804 continue;
805 if (cluster->active_cpus == need)
806 break;
807
Olav Haugan85970732016-11-08 13:45:01 -0800808 spin_unlock_irqrestore(&state_lock, flags);
809
Olav Haugan833926cb2016-06-27 11:38:06 -0700810 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
811 if (!sched_unisolate_cpu(c->cpu)) {
812 c->isolated_by_us = false;
813 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530814 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700815 } else {
816 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
817 }
818 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800819 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700820 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530821 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800822 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700823}
824
825static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
826{
827 bool force_use_non_preferred = false;
828
829 __try_to_unisolate(cluster, need, force_use_non_preferred);
830
831 if (cluster->active_cpus == need)
832 return;
833
834 force_use_non_preferred = true;
835 __try_to_unisolate(cluster, need, force_use_non_preferred);
836}
837
838static void __ref do_core_ctl(struct cluster_data *cluster)
839{
840 unsigned int need;
841
842 need = apply_limits(cluster, cluster->need_cpus);
843
844 if (adjustment_possible(cluster, need)) {
845 pr_debug("Trying to adjust group %u from %u to %u\n",
846 cluster->first_cpu, cluster->active_cpus, need);
847
848 if (cluster->active_cpus > need)
849 try_to_isolate(cluster, need);
850 else if (cluster->active_cpus < need)
851 try_to_unisolate(cluster, need);
852 }
853}
854
855static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700856{
Olav Haugane3a3db92016-06-27 11:35:43 -0700857 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700858 unsigned long flags;
859
860 while (1) {
861 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700862 spin_lock_irqsave(&cluster->pending_lock, flags);
863 if (!cluster->pending) {
864 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700865 schedule();
866 if (kthread_should_stop())
867 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700868 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700869 }
870 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700871 cluster->pending = false;
872 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700873
Olav Haugan833926cb2016-06-27 11:38:06 -0700874 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700875 }
876
877 return 0;
878}
879
880static int __ref cpu_callback(struct notifier_block *nfb,
881 unsigned long action, void *hcpu)
882{
883 uint32_t cpu = (uintptr_t)hcpu;
884 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700885 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700886 unsigned int need;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530887 bool do_wakeup, unisolated = false;
888 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700889
Olav Haugane3a3db92016-06-27 11:35:43 -0700890 if (unlikely(!cluster || !cluster->inited))
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530891 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700892
Olav Haugan1e8a44c2016-11-17 18:31:33 -0800893 switch (action & ~CPU_TASKS_FROZEN) {
Olav Haugan9306c802016-08-18 17:22:44 -0700894 case CPU_ONLINE:
Olav Haugan833926cb2016-06-27 11:38:06 -0700895 cluster->active_cpus = get_active_cpu_count(cluster);
896
Olav Haugan9306c802016-08-18 17:22:44 -0700897 /*
898 * Moving to the end of the list should only happen in
899 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
900 * infinite list traversal when thermal (or other entities)
901 * reject trying to online CPUs.
902 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700903 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700904 break;
905
906 case CPU_DEAD:
Olav Haugan833926cb2016-06-27 11:38:06 -0700907 /*
908 * We don't want to have a CPU both offline and isolated.
909 * So unisolate a CPU that went down if it was isolated by us.
910 */
911 if (state->isolated_by_us) {
912 sched_unisolate_cpu_unlocked(cpu);
913 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530914 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -0700915 }
916
Olav Haugan9306c802016-08-18 17:22:44 -0700917 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700918 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700919
Olav Haugan9306c802016-08-18 17:22:44 -0700920 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700921 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700922 break;
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530923 default:
924 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700925 }
926
Olav Haugan833926cb2016-06-27 11:38:06 -0700927 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530928 spin_lock_irqsave(&state_lock, flags);
929 if (unisolated)
930 cluster->nr_isolated_cpus--;
931 do_wakeup = adjustment_possible(cluster, need);
932 spin_unlock_irqrestore(&state_lock, flags);
933 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -0700934 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700935
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530936 return NOTIFY_OK;
Olav Haugan9306c802016-08-18 17:22:44 -0700937}
938
939static struct notifier_block __refdata cpu_notifier = {
940 .notifier_call = cpu_callback,
941};
942
943/* ============================ init code ============================== */
944
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530945static cpumask_var_t core_ctl_disable_cpumask;
946static bool core_ctl_disable_cpumask_present;
947
948static int __init core_ctl_disable_setup(char *str)
949{
950 if (!*str)
951 return -EINVAL;
952
953 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
954
955 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
956 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
957 return -EINVAL;
958 }
959
960 core_ctl_disable_cpumask_present = true;
961 pr_info("disable_cpumask=%*pbl\n",
962 cpumask_pr_args(core_ctl_disable_cpumask));
963
964 return 0;
965}
966early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
967
968static bool should_skip(const struct cpumask *mask)
969{
970 if (!core_ctl_disable_cpumask_present)
971 return false;
972
973 /*
974 * We operate on a cluster basis. Disable the core_ctl for
975 * a cluster, if all of it's cpus are specified in
976 * core_ctl_disable_cpumask
977 */
978 return cpumask_subset(mask, core_ctl_disable_cpumask);
979}
980
Olav Haugane3a3db92016-06-27 11:35:43 -0700981static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
982{
983 unsigned int i;
984
985 for (i = 0; i < num_clusters; ++i) {
986 if (cluster_state[i].first_cpu == first_cpu)
987 return &cluster_state[i];
988 }
989
990 return NULL;
991}
992
993static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -0700994{
995 struct device *dev;
996 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -0700997 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700998 struct cpu_data *state;
999 unsigned int cpu;
1000 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1001
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301002 if (should_skip(mask))
1003 return 0;
1004
Olav Haugane3a3db92016-06-27 11:35:43 -07001005 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001006 return 0;
1007
1008 dev = get_cpu_device(first_cpu);
1009 if (!dev)
1010 return -ENODEV;
1011
1012 pr_info("Creating CPU group %d\n", first_cpu);
1013
Olav Haugane3a3db92016-06-27 11:35:43 -07001014 if (num_clusters == MAX_CLUSTERS) {
1015 pr_err("Unsupported number of clusters. Only %u supported\n",
1016 MAX_CLUSTERS);
1017 return -EINVAL;
1018 }
1019 cluster = &cluster_state[num_clusters];
1020 ++num_clusters;
1021
Olav Haugan833926cb2016-06-27 11:38:06 -07001022 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001023 cluster->num_cpus = cpumask_weight(mask);
1024 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001025 pr_err("HW configuration not supported\n");
1026 return -EINVAL;
1027 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001028 cluster->first_cpu = first_cpu;
1029 cluster->min_cpus = 1;
1030 cluster->max_cpus = cluster->num_cpus;
1031 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001032 cluster->offline_delay_ms = 100;
1033 cluster->task_thres = UINT_MAX;
1034 cluster->nrrun = cluster->num_cpus;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -07001035 cluster->enable = true;
Olav Haugane3a3db92016-06-27 11:35:43 -07001036 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001037 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001038
1039 for_each_cpu(cpu, mask) {
1040 pr_info("Init CPU%u state\n", cpu);
1041
1042 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001043 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001044 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001045 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001046 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001047 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001048
Olav Haugan833926cb2016-06-27 11:38:06 -07001049 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001050 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001051 if (IS_ERR(cluster->core_ctl_thread))
1052 return PTR_ERR(cluster->core_ctl_thread);
1053
1054 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001055 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001056
Olav Haugane3a3db92016-06-27 11:35:43 -07001057 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001058
Olav Haugane3a3db92016-06-27 11:35:43 -07001059 kobject_init(&cluster->kobj, &ktype_core_ctl);
1060 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001061}
1062
Olav Haugan9306c802016-08-18 17:22:44 -07001063static int __init core_ctl_init(void)
1064{
Olav Haugan9306c802016-08-18 17:22:44 -07001065 unsigned int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301066 struct cpumask cpus = *cpu_possible_mask;
Olav Haugan9306c802016-08-18 17:22:44 -07001067
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301068 if (should_skip(cpu_possible_mask))
1069 return 0;
1070
Olav Haugan9306c802016-08-18 17:22:44 -07001071 register_cpu_notifier(&cpu_notifier);
Olav Haugan9306c802016-08-18 17:22:44 -07001072
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301073 for_each_cpu(cpu, &cpus) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001074 int ret;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301075 const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001076
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301077 ret = cluster_init(cluster_cpus);
1078 if (ret)
1079 pr_warn("unable to create core ctl group: %d\n", ret);
1080 cpumask_andnot(&cpus, &cpus, cluster_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -07001081 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001082 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001083 return 0;
1084}
1085
1086late_initcall(core_ctl_init);