blob: 005d15e927acca15babcd2d1fb63f3c90e1fe9ab [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
16#include <linux/notifier.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070020#include <linux/kthread.h>
21#include <linux/sched.h>
22#include <linux/sched/rt.h>
23
Olav Haugan09bcc682016-09-02 17:12:20 -070024#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070025
Pavankumar Kondeti4b621962017-04-10 14:08:31 +053026#define MAX_CPUS_PER_CLUSTER 6
Olav Haugane3a3db92016-06-27 11:35:43 -070027#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070028
Olav Haugane3a3db92016-06-27 11:35:43 -070029struct cluster_data {
30 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070031 unsigned int min_cpus;
32 unsigned int max_cpus;
33 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070034 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
35 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070036 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053038 unsigned int nr_isolated_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070039 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070040 unsigned int need_cpus;
41 unsigned int task_thres;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053042 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070043 struct list_head lru;
44 bool pending;
45 spinlock_t pending_lock;
46 bool is_big_cluster;
47 int nrrun;
Olav Haugan833926cb2016-06-27 11:38:06 -070048 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070049 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070050 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070051 struct kobject kobj;
52};
53
Olav Haugane3a3db92016-06-27 11:35:43 -070054struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070055 bool is_busy;
56 unsigned int busy;
57 unsigned int cpu;
58 bool not_preferred;
59 struct cluster_data *cluster;
60 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070061 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070062};
63
Olav Haugan9306c802016-08-18 17:22:44 -070064static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070065static struct cluster_data cluster_state[MAX_CLUSTERS];
66static unsigned int num_clusters;
67
68#define for_each_cluster(cluster, idx) \
69 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
70 (idx)++, (cluster) = &cluster_state[idx])
71
Olav Haugan9306c802016-08-18 17:22:44 -070072static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070073static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070074static void wake_up_core_ctl_thread(struct cluster_data *state);
75static bool initialized;
76
77static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070078
79/* ========================= sysfs interface =========================== */
80
Olav Haugane3a3db92016-06-27 11:35:43 -070081static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070082 const char *buf, size_t count)
83{
84 unsigned int val;
85
86 if (sscanf(buf, "%u\n", &val) != 1)
87 return -EINVAL;
88
89 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070090 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070091
92 return count;
93}
94
Olav Haugane3a3db92016-06-27 11:35:43 -070095static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070096{
97 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
98}
99
Olav Haugane3a3db92016-06-27 11:35:43 -0700100static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700101 const char *buf, size_t count)
102{
103 unsigned int val;
104
105 if (sscanf(buf, "%u\n", &val) != 1)
106 return -EINVAL;
107
108 val = min(val, state->num_cpus);
109 state->max_cpus = val;
110 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700111 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700112
113 return count;
114}
115
Olav Haugane3a3db92016-06-27 11:35:43 -0700116static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700117{
118 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
119}
120
Olav Haugane3a3db92016-06-27 11:35:43 -0700121static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700122 const char *buf, size_t count)
123{
124 unsigned int val;
125
126 if (sscanf(buf, "%u\n", &val) != 1)
127 return -EINVAL;
128
129 state->offline_delay_ms = val;
130 apply_need(state);
131
132 return count;
133}
134
Olav Haugane3a3db92016-06-27 11:35:43 -0700135static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700136{
137 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
138}
139
Olav Haugane3a3db92016-06-27 11:35:43 -0700140static ssize_t store_task_thres(struct cluster_data *state,
141 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700142{
143 unsigned int val;
144
145 if (sscanf(buf, "%u\n", &val) != 1)
146 return -EINVAL;
147
148 if (val < state->num_cpus)
149 return -EINVAL;
150
151 state->task_thres = val;
152 apply_need(state);
153
154 return count;
155}
156
Olav Haugane3a3db92016-06-27 11:35:43 -0700157static ssize_t show_offline_delay_ms(const struct cluster_data *state,
158 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700159{
160 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
161}
162
Olav Haugane3a3db92016-06-27 11:35:43 -0700163static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700164 const char *buf, size_t count)
165{
Olav Haugane3a3db92016-06-27 11:35:43 -0700166 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700167 int ret, i;
168
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530169 ret = sscanf(buf, "%u %u %u %u %u %u\n",
170 &val[0], &val[1], &val[2], &val[3],
171 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700172 if (ret != 1 && ret != state->num_cpus)
173 return -EINVAL;
174
175 if (ret == 1) {
176 for (i = 0; i < state->num_cpus; i++)
177 state->busy_up_thres[i] = val[0];
178 } else {
179 for (i = 0; i < state->num_cpus; i++)
180 state->busy_up_thres[i] = val[i];
181 }
182 apply_need(state);
183 return count;
184}
185
Olav Haugane3a3db92016-06-27 11:35:43 -0700186static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700187{
188 int i, count = 0;
189
190 for (i = 0; i < state->num_cpus; i++)
191 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
192 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700193
Olav Haugan9306c802016-08-18 17:22:44 -0700194 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
195 return count;
196}
197
Olav Haugane3a3db92016-06-27 11:35:43 -0700198static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700199 const char *buf, size_t count)
200{
Olav Haugane3a3db92016-06-27 11:35:43 -0700201 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700202 int ret, i;
203
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530204 ret = sscanf(buf, "%u %u %u %u %u %u\n",
205 &val[0], &val[1], &val[2], &val[3],
206 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700207 if (ret != 1 && ret != state->num_cpus)
208 return -EINVAL;
209
210 if (ret == 1) {
211 for (i = 0; i < state->num_cpus; i++)
212 state->busy_down_thres[i] = val[0];
213 } else {
214 for (i = 0; i < state->num_cpus; i++)
215 state->busy_down_thres[i] = val[i];
216 }
217 apply_need(state);
218 return count;
219}
220
Olav Haugane3a3db92016-06-27 11:35:43 -0700221static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700222{
223 int i, count = 0;
224
225 for (i = 0; i < state->num_cpus; i++)
226 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
227 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700228
Olav Haugan9306c802016-08-18 17:22:44 -0700229 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
230 return count;
231}
232
Olav Haugane3a3db92016-06-27 11:35:43 -0700233static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700234 const char *buf, size_t count)
235{
236 unsigned int val;
237
238 if (sscanf(buf, "%u\n", &val) != 1)
239 return -EINVAL;
240
241 state->is_big_cluster = val ? 1 : 0;
242 return count;
243}
244
Olav Haugane3a3db92016-06-27 11:35:43 -0700245static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700246{
247 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
248}
249
Olav Haugane3a3db92016-06-27 11:35:43 -0700250static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700251{
252 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
253}
254
Olav Haugan833926cb2016-06-27 11:38:06 -0700255static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700256{
Olav Haugan833926cb2016-06-27 11:38:06 -0700257 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700258}
259
Olav Haugane3a3db92016-06-27 11:35:43 -0700260static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700261{
262 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700263 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700264 ssize_t count = 0;
265 unsigned int cpu;
266
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530267 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700268 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700269 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700270 cluster = c->cluster;
271 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700272 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700273
Olav Haugan9306c802016-08-18 17:22:44 -0700274 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700275 "CPU%u\n", cpu);
276 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700277 "\tCPU: %u\n", c->cpu);
278 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530279 "\tOnline: %u\n",
280 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700281 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530282 "\tIsolated: %u\n",
283 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700284 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700285 "\tFirst CPU: %u\n",
286 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700287 count += snprintf(buf + count, PAGE_SIZE - count,
288 "\tBusy%%: %u\n", c->busy);
289 count += snprintf(buf + count, PAGE_SIZE - count,
290 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700291 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530292 "\tNot preferred: %u\n",
293 c->not_preferred);
294 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700295 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700296 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700297 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700298 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700299 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700300 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530301 "\tNr isolated CPUs: %u\n",
302 cluster->nr_isolated_cpus);
303 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700304 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700305 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530306 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700307
308 return count;
309}
310
Olav Haugane3a3db92016-06-27 11:35:43 -0700311static ssize_t store_not_preferred(struct cluster_data *state,
312 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700313{
314 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700315 unsigned int i;
316 unsigned int val[MAX_CPUS_PER_CLUSTER];
317 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700318 int ret;
319
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530320 ret = sscanf(buf, "%u %u %u %u %u %u\n",
321 &val[0], &val[1], &val[2], &val[3],
322 &val[4], &val[5]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530323 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700324 return -EINVAL;
325
Olav Haugane3a3db92016-06-27 11:35:43 -0700326 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530327 for (i = 0; i < state->num_cpus; i++) {
328 c = &per_cpu(cpu_state, i + state->first_cpu);
329 c->not_preferred = val[i];
330 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700331 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700332
333 return count;
334}
335
Olav Haugane3a3db92016-06-27 11:35:43 -0700336static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700337{
338 struct cpu_data *c;
339 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700340 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530341 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700342
Olav Haugane3a3db92016-06-27 11:35:43 -0700343 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530344 for (i = 0; i < state->num_cpus; i++) {
345 c = &per_cpu(cpu_state, i + state->first_cpu);
346 count += scnprintf(buf + count, PAGE_SIZE - count,
347 "CPU#%d: %u\n", c->cpu, c->not_preferred);
348 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700349 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700350
351 return count;
352}
353
Olav Haugane3a3db92016-06-27 11:35:43 -0700354
Olav Haugan9306c802016-08-18 17:22:44 -0700355struct core_ctl_attr {
356 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700357 ssize_t (*show)(const struct cluster_data *, char *);
358 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700359};
360
361#define core_ctl_attr_ro(_name) \
362static struct core_ctl_attr _name = \
363__ATTR(_name, 0444, show_##_name, NULL)
364
365#define core_ctl_attr_rw(_name) \
366static struct core_ctl_attr _name = \
367__ATTR(_name, 0644, show_##_name, store_##_name)
368
369core_ctl_attr_rw(min_cpus);
370core_ctl_attr_rw(max_cpus);
371core_ctl_attr_rw(offline_delay_ms);
372core_ctl_attr_rw(busy_up_thres);
373core_ctl_attr_rw(busy_down_thres);
374core_ctl_attr_rw(task_thres);
375core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700376core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700377core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700378core_ctl_attr_ro(global_state);
379core_ctl_attr_rw(not_preferred);
380
381static struct attribute *default_attrs[] = {
382 &min_cpus.attr,
383 &max_cpus.attr,
384 &offline_delay_ms.attr,
385 &busy_up_thres.attr,
386 &busy_down_thres.attr,
387 &task_thres.attr,
388 &is_big_cluster.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700389 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700390 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700391 &global_state.attr,
392 &not_preferred.attr,
393 NULL
394};
395
Olav Haugane3a3db92016-06-27 11:35:43 -0700396#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700397#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
398static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
399{
Olav Haugane3a3db92016-06-27 11:35:43 -0700400 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700401 struct core_ctl_attr *cattr = to_attr(attr);
402 ssize_t ret = -EIO;
403
404 if (cattr->show)
405 ret = cattr->show(data, buf);
406
407 return ret;
408}
409
410static ssize_t store(struct kobject *kobj, struct attribute *attr,
411 const char *buf, size_t count)
412{
Olav Haugane3a3db92016-06-27 11:35:43 -0700413 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700414 struct core_ctl_attr *cattr = to_attr(attr);
415 ssize_t ret = -EIO;
416
417 if (cattr->store)
418 ret = cattr->store(data, buf, count);
419
420 return ret;
421}
422
423static const struct sysfs_ops sysfs_ops = {
424 .show = show,
425 .store = store,
426};
427
428static struct kobj_type ktype_core_ctl = {
429 .sysfs_ops = &sysfs_ops,
430 .default_attrs = default_attrs,
431};
432
433/* ==================== runqueue based core count =================== */
434
Olav Haugan9306c802016-08-18 17:22:44 -0700435#define NR_RUNNING_TOLERANCE 5
Olav Haugan9306c802016-08-18 17:22:44 -0700436
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530437static void update_running_avg(void)
Olav Haugan9306c802016-08-18 17:22:44 -0700438{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530439 int avg, iowait_avg, big_avg;
Olav Haugane3a3db92016-06-27 11:35:43 -0700440 struct cluster_data *cluster;
441 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700442
Olav Haugan9306c802016-08-18 17:22:44 -0700443 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
444
Olav Haugan9306c802016-08-18 17:22:44 -0700445 /*
446 * Round up to the next integer if the average nr running tasks
447 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
448 * If normal rounding up is used, it will allow a transient task
449 * to trigger online event. By the time core is onlined, the task
450 * has finished.
451 * Rounding to closest suffers same problem because scheduler
452 * might only provide running stats per jiffy, and a transient
453 * task could skew the number for one jiffy. If core control
454 * samples every 2 jiffies, it will observe 0.5 additional running
455 * average which rounds up to 1 task.
456 */
457 avg = (avg + NR_RUNNING_TOLERANCE) / 100;
458 big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
459
Olav Haugane3a3db92016-06-27 11:35:43 -0700460 for_each_cluster(cluster, index) {
461 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700462 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700463 /*
464 * Big cluster only need to take care of big tasks, but if
465 * there are not enough big cores, big tasks need to be run
466 * on little as well. Thus for little's runqueue stat, it
467 * has to use overall runqueue average, or derive what big
468 * tasks would have to be run on little. The latter approach
469 * is not easy to get given core control reacts much slower
470 * than scheduler, and can't predict scheduler's behavior.
471 */
Olav Haugane3a3db92016-06-27 11:35:43 -0700472 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
Olav Haugan9306c802016-08-18 17:22:44 -0700473 }
474}
475
476/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700477static unsigned int apply_task_need(const struct cluster_data *cluster,
478 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700479{
Olav Haugan833926cb2016-06-27 11:38:06 -0700480 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700481 if (cluster->nrrun >= cluster->task_thres)
482 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700483
Olav Haugan833926cb2016-06-27 11:38:06 -0700484 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700485 if (cluster->nrrun > new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700486 return new_need + 1;
487
488 return new_need;
489}
490
Olav Haugan9306c802016-08-18 17:22:44 -0700491/* ======================= load based core count ====================== */
492
Olav Haugane3a3db92016-06-27 11:35:43 -0700493static unsigned int apply_limits(const struct cluster_data *cluster,
494 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700495{
Olav Haugane3a3db92016-06-27 11:35:43 -0700496 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700497}
498
Olav Haugan833926cb2016-06-27 11:38:06 -0700499static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
500{
501 return cluster->num_cpus -
502 sched_isolate_count(&cluster->cpu_mask, true);
503}
504
505static bool is_active(const struct cpu_data *state)
506{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530507 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700508}
509
510static bool adjustment_possible(const struct cluster_data *cluster,
511 unsigned int need)
512{
513 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530514 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700515}
516
Olav Haugane3a3db92016-06-27 11:35:43 -0700517static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700518{
519 unsigned long flags;
520 struct cpu_data *c;
521 unsigned int need_cpus = 0, last_need, thres_idx;
522 int ret = 0;
523 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700524 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530525 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700526
Olav Haugane3a3db92016-06-27 11:35:43 -0700527 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700528 return 0;
529
530 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700531
532 if (cluster->boost) {
533 need_cpus = cluster->max_cpus;
534 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530535 cluster->active_cpus = get_active_cpu_count(cluster);
536 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700537 list_for_each_entry(c, &cluster->lru, sib) {
538 if (c->busy >= cluster->busy_up_thres[thres_idx])
539 c->is_busy = true;
540 else if (c->busy < cluster->busy_down_thres[thres_idx])
541 c->is_busy = false;
542 need_cpus += c->is_busy;
543 }
544 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700545 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700546 new_need = apply_limits(cluster, need_cpus);
547 need_flag = adjustment_possible(cluster, new_need);
548
Olav Haugane3a3db92016-06-27 11:35:43 -0700549 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530550 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700551
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530552 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700553 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530554 } else {
555 if (new_need == last_need) {
556 cluster->need_ts = now;
557 spin_unlock_irqrestore(&state_lock, flags);
558 return 0;
559 }
Olav Haugan9306c802016-08-18 17:22:44 -0700560
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530561 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700562 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700563 }
564
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530565 if (ret) {
566 cluster->need_ts = now;
567 cluster->need_cpus = new_need;
568 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530569 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700570 ret && need_flag);
571 spin_unlock_irqrestore(&state_lock, flags);
572
573 return ret && need_flag;
574}
575
Olav Haugane3a3db92016-06-27 11:35:43 -0700576static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700577{
Olav Haugane3a3db92016-06-27 11:35:43 -0700578 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700579 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700580}
581
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530582static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy)
Olav Haugan9306c802016-08-18 17:22:44 -0700583{
Olav Haugan9306c802016-08-18 17:22:44 -0700584 unsigned int old_is_busy = c->is_busy;
585
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530586 if (c->busy == busy)
587 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700588
Olav Haugan9306c802016-08-18 17:22:44 -0700589 c->busy = busy;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530590 trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700591}
592
593/* ========================= core count enforcement ==================== */
594
Olav Haugan833926cb2016-06-27 11:38:06 -0700595static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700596{
597 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700598
Olav Haugane3a3db92016-06-27 11:35:43 -0700599 spin_lock_irqsave(&cluster->pending_lock, flags);
600 cluster->pending = true;
601 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700602
Olav Haugan833926cb2016-06-27 11:38:06 -0700603 wake_up_process_no_notif(cluster->core_ctl_thread);
604}
605
606static u64 core_ctl_check_timestamp;
Olav Haugan833926cb2016-06-27 11:38:06 -0700607
Olav Haugana024f472016-10-13 10:34:11 -0700608int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700609{
610 unsigned int index = 0;
611 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700612 unsigned long flags;
613 int ret = 0;
614 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700615
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530616 if (unlikely(!initialized))
617 return 0;
618
Olav Haugana024f472016-10-13 10:34:11 -0700619 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700620 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700621 if (cluster->is_big_cluster) {
622 if (boost) {
623 boost_state_changed = !cluster->boost;
624 ++cluster->boost;
625 } else {
626 if (!cluster->boost) {
627 pr_err("Error turning off boost. Boost already turned off\n");
628 ret = -EINVAL;
629 } else {
630 --cluster->boost;
631 boost_state_changed = !cluster->boost;
632 }
633 }
634 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700635 }
Olav Haugan9306c802016-08-18 17:22:44 -0700636 }
Olav Haugana024f472016-10-13 10:34:11 -0700637 spin_unlock_irqrestore(&state_lock, flags);
638
639 if (boost_state_changed)
640 apply_need(cluster);
641
642 trace_core_ctl_set_boost(cluster->boost, ret);
643
644 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700645}
Olav Haugan4d77e572016-11-14 16:14:23 -0800646EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700647
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530648void core_ctl_check(u64 window_start)
Olav Haugan9306c802016-08-18 17:22:44 -0700649{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530650 int cpu;
651 unsigned int busy;
652 struct cpu_data *c;
653 struct cluster_data *cluster;
654 unsigned int index = 0;
655
Olav Haugan833926cb2016-06-27 11:38:06 -0700656 if (unlikely(!initialized))
657 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700658
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530659 if (window_start == core_ctl_check_timestamp)
660 return;
Olav Haugane3a3db92016-06-27 11:35:43 -0700661
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530662 core_ctl_check_timestamp = window_start;
Olav Haugan833926cb2016-06-27 11:38:06 -0700663
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530664 for_each_possible_cpu(cpu) {
665
666 c = &per_cpu(cpu_state, cpu);
667 cluster = c->cluster;
668
669 if (!cluster || !cluster->inited)
670 continue;
671
672 busy = sched_get_cpu_util(cpu);
673 core_ctl_set_busy(c, busy);
674 }
675
676 update_running_avg();
677
678 for_each_cluster(cluster, index) {
679 if (eval_need(cluster))
680 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700681 }
Olav Haugan9306c802016-08-18 17:22:44 -0700682}
683
Olav Haugan833926cb2016-06-27 11:38:06 -0700684static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700685{
Olav Haugan833926cb2016-06-27 11:38:06 -0700686 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700687
Olav Haugan833926cb2016-06-27 11:38:06 -0700688 spin_lock_irqsave(&state_lock, flags);
689 list_del(&cpu_data->sib);
690 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
691 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700692}
693
Olav Haugan833926cb2016-06-27 11:38:06 -0700694static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700695{
Olav Haugan9306c802016-08-18 17:22:44 -0700696 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800697 unsigned long flags;
698 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530699 unsigned int nr_isolated = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700700
Olav Haugan85970732016-11-08 13:45:01 -0800701 /*
702 * Protect against entry being removed (and added at tail) by other
703 * thread (hotplug).
704 */
705 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700706 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800707 if (!num_cpus--)
708 break;
709
Olav Haugan833926cb2016-06-27 11:38:06 -0700710 if (!is_active(c))
711 continue;
712 if (cluster->active_cpus == need)
713 break;
714 /* Don't offline busy CPUs. */
715 if (c->is_busy)
716 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700717
Olav Haugan85970732016-11-08 13:45:01 -0800718 spin_unlock_irqrestore(&state_lock, flags);
719
Olav Haugan833926cb2016-06-27 11:38:06 -0700720 pr_debug("Trying to isolate CPU%u\n", c->cpu);
721 if (!sched_isolate_cpu(c->cpu)) {
722 c->isolated_by_us = true;
723 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530724 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700725 } else {
726 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700727 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700728 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800729 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700730 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530731 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800732 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700733
Olav Haugan833926cb2016-06-27 11:38:06 -0700734 /*
735 * If the number of active CPUs is within the limits, then
736 * don't force isolation of any busy CPUs.
737 */
738 if (cluster->active_cpus <= cluster->max_cpus)
739 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700740
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530741 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800742 num_cpus = cluster->num_cpus;
743 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700744 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800745 if (!num_cpus--)
746 break;
747
Olav Haugan833926cb2016-06-27 11:38:06 -0700748 if (!is_active(c))
749 continue;
750 if (cluster->active_cpus <= cluster->max_cpus)
751 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700752
Olav Haugan85970732016-11-08 13:45:01 -0800753 spin_unlock_irqrestore(&state_lock, flags);
754
Olav Haugan833926cb2016-06-27 11:38:06 -0700755 pr_debug("Trying to isolate CPU%u\n", c->cpu);
756 if (!sched_isolate_cpu(c->cpu)) {
757 c->isolated_by_us = true;
758 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530759 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700760 } else {
761 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700762 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700763 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800764 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700765 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530766 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800767 spin_unlock_irqrestore(&state_lock, flags);
768
Olav Haugan9306c802016-08-18 17:22:44 -0700769}
770
Olav Haugan833926cb2016-06-27 11:38:06 -0700771static void __try_to_unisolate(struct cluster_data *cluster,
772 unsigned int need, bool force)
773{
774 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800775 unsigned long flags;
776 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530777 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700778
Olav Haugan85970732016-11-08 13:45:01 -0800779 /*
780 * Protect against entry being removed (and added at tail) by other
781 * thread (hotplug).
782 */
783 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700784 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800785 if (!num_cpus--)
786 break;
787
Olav Haugan833926cb2016-06-27 11:38:06 -0700788 if (!c->isolated_by_us)
789 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530790 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700791 (!force && c->not_preferred))
792 continue;
793 if (cluster->active_cpus == need)
794 break;
795
Olav Haugan85970732016-11-08 13:45:01 -0800796 spin_unlock_irqrestore(&state_lock, flags);
797
Olav Haugan833926cb2016-06-27 11:38:06 -0700798 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
799 if (!sched_unisolate_cpu(c->cpu)) {
800 c->isolated_by_us = false;
801 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530802 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700803 } else {
804 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
805 }
806 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800807 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700808 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530809 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800810 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700811}
812
813static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
814{
815 bool force_use_non_preferred = false;
816
817 __try_to_unisolate(cluster, need, force_use_non_preferred);
818
819 if (cluster->active_cpus == need)
820 return;
821
822 force_use_non_preferred = true;
823 __try_to_unisolate(cluster, need, force_use_non_preferred);
824}
825
826static void __ref do_core_ctl(struct cluster_data *cluster)
827{
828 unsigned int need;
829
830 need = apply_limits(cluster, cluster->need_cpus);
831
832 if (adjustment_possible(cluster, need)) {
833 pr_debug("Trying to adjust group %u from %u to %u\n",
834 cluster->first_cpu, cluster->active_cpus, need);
835
836 if (cluster->active_cpus > need)
837 try_to_isolate(cluster, need);
838 else if (cluster->active_cpus < need)
839 try_to_unisolate(cluster, need);
840 }
841}
842
843static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700844{
Olav Haugane3a3db92016-06-27 11:35:43 -0700845 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700846 unsigned long flags;
847
848 while (1) {
849 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700850 spin_lock_irqsave(&cluster->pending_lock, flags);
851 if (!cluster->pending) {
852 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700853 schedule();
854 if (kthread_should_stop())
855 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700856 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700857 }
858 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700859 cluster->pending = false;
860 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700861
Olav Haugan833926cb2016-06-27 11:38:06 -0700862 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700863 }
864
865 return 0;
866}
867
868static int __ref cpu_callback(struct notifier_block *nfb,
869 unsigned long action, void *hcpu)
870{
871 uint32_t cpu = (uintptr_t)hcpu;
872 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700873 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700874 unsigned int need;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530875 bool do_wakeup, unisolated = false;
876 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700877
Olav Haugane3a3db92016-06-27 11:35:43 -0700878 if (unlikely(!cluster || !cluster->inited))
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530879 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700880
Olav Haugan1e8a44c2016-11-17 18:31:33 -0800881 switch (action & ~CPU_TASKS_FROZEN) {
Olav Haugan9306c802016-08-18 17:22:44 -0700882 case CPU_ONLINE:
Olav Haugan833926cb2016-06-27 11:38:06 -0700883 cluster->active_cpus = get_active_cpu_count(cluster);
884
Olav Haugan9306c802016-08-18 17:22:44 -0700885 /*
886 * Moving to the end of the list should only happen in
887 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
888 * infinite list traversal when thermal (or other entities)
889 * reject trying to online CPUs.
890 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700891 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700892 break;
893
894 case CPU_DEAD:
Olav Haugan833926cb2016-06-27 11:38:06 -0700895 /*
896 * We don't want to have a CPU both offline and isolated.
897 * So unisolate a CPU that went down if it was isolated by us.
898 */
899 if (state->isolated_by_us) {
900 sched_unisolate_cpu_unlocked(cpu);
901 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530902 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -0700903 }
904
Olav Haugan9306c802016-08-18 17:22:44 -0700905 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700906 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700907
Olav Haugan9306c802016-08-18 17:22:44 -0700908 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700909 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700910 break;
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530911 default:
912 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700913 }
914
Olav Haugan833926cb2016-06-27 11:38:06 -0700915 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530916 spin_lock_irqsave(&state_lock, flags);
917 if (unisolated)
918 cluster->nr_isolated_cpus--;
919 do_wakeup = adjustment_possible(cluster, need);
920 spin_unlock_irqrestore(&state_lock, flags);
921 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -0700922 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700923
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530924 return NOTIFY_OK;
Olav Haugan9306c802016-08-18 17:22:44 -0700925}
926
927static struct notifier_block __refdata cpu_notifier = {
928 .notifier_call = cpu_callback,
929};
930
931/* ============================ init code ============================== */
932
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530933static cpumask_var_t core_ctl_disable_cpumask;
934static bool core_ctl_disable_cpumask_present;
935
936static int __init core_ctl_disable_setup(char *str)
937{
938 if (!*str)
939 return -EINVAL;
940
941 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
942
943 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
944 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
945 return -EINVAL;
946 }
947
948 core_ctl_disable_cpumask_present = true;
949 pr_info("disable_cpumask=%*pbl\n",
950 cpumask_pr_args(core_ctl_disable_cpumask));
951
952 return 0;
953}
954early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
955
956static bool should_skip(const struct cpumask *mask)
957{
958 if (!core_ctl_disable_cpumask_present)
959 return false;
960
961 /*
962 * We operate on a cluster basis. Disable the core_ctl for
963 * a cluster, if all of it's cpus are specified in
964 * core_ctl_disable_cpumask
965 */
966 return cpumask_subset(mask, core_ctl_disable_cpumask);
967}
968
Olav Haugane3a3db92016-06-27 11:35:43 -0700969static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
970{
971 unsigned int i;
972
973 for (i = 0; i < num_clusters; ++i) {
974 if (cluster_state[i].first_cpu == first_cpu)
975 return &cluster_state[i];
976 }
977
978 return NULL;
979}
980
981static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -0700982{
983 struct device *dev;
984 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -0700985 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700986 struct cpu_data *state;
987 unsigned int cpu;
988 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
989
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530990 if (should_skip(mask))
991 return 0;
992
Olav Haugane3a3db92016-06-27 11:35:43 -0700993 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -0700994 return 0;
995
996 dev = get_cpu_device(first_cpu);
997 if (!dev)
998 return -ENODEV;
999
1000 pr_info("Creating CPU group %d\n", first_cpu);
1001
Olav Haugane3a3db92016-06-27 11:35:43 -07001002 if (num_clusters == MAX_CLUSTERS) {
1003 pr_err("Unsupported number of clusters. Only %u supported\n",
1004 MAX_CLUSTERS);
1005 return -EINVAL;
1006 }
1007 cluster = &cluster_state[num_clusters];
1008 ++num_clusters;
1009
Olav Haugan833926cb2016-06-27 11:38:06 -07001010 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001011 cluster->num_cpus = cpumask_weight(mask);
1012 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001013 pr_err("HW configuration not supported\n");
1014 return -EINVAL;
1015 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001016 cluster->first_cpu = first_cpu;
1017 cluster->min_cpus = 1;
1018 cluster->max_cpus = cluster->num_cpus;
1019 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001020 cluster->offline_delay_ms = 100;
1021 cluster->task_thres = UINT_MAX;
1022 cluster->nrrun = cluster->num_cpus;
1023 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001024 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001025
1026 for_each_cpu(cpu, mask) {
1027 pr_info("Init CPU%u state\n", cpu);
1028
1029 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001030 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001031 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001032 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001033 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001034 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001035
Olav Haugan833926cb2016-06-27 11:38:06 -07001036 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001037 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001038 if (IS_ERR(cluster->core_ctl_thread))
1039 return PTR_ERR(cluster->core_ctl_thread);
1040
1041 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001042 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001043
Olav Haugane3a3db92016-06-27 11:35:43 -07001044 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001045
Olav Haugane3a3db92016-06-27 11:35:43 -07001046 kobject_init(&cluster->kobj, &ktype_core_ctl);
1047 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001048}
1049
Olav Haugan9306c802016-08-18 17:22:44 -07001050static int __init core_ctl_init(void)
1051{
Olav Haugan9306c802016-08-18 17:22:44 -07001052 unsigned int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301053 struct cpumask cpus = *cpu_possible_mask;
Olav Haugan9306c802016-08-18 17:22:44 -07001054
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301055 if (should_skip(cpu_possible_mask))
1056 return 0;
1057
Olav Haugan9306c802016-08-18 17:22:44 -07001058 register_cpu_notifier(&cpu_notifier);
Olav Haugan9306c802016-08-18 17:22:44 -07001059
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301060 for_each_cpu(cpu, &cpus) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001061 int ret;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301062 const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001063
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301064 ret = cluster_init(cluster_cpus);
1065 if (ret)
1066 pr_warn("unable to create core ctl group: %d\n", ret);
1067 cpumask_andnot(&cpus, &cpus, cluster_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -07001068 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001069 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001070 return 0;
1071}
1072
1073late_initcall(core_ctl_init);