blob: 0b5f2dea18a104f22def07cabec75510714b6890 [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
16#include <linux/notifier.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070020#include <linux/kthread.h>
21#include <linux/sched.h>
22#include <linux/sched/rt.h>
23
Olav Haugan09bcc682016-09-02 17:12:20 -070024#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070025
Olav Haugane3a3db92016-06-27 11:35:43 -070026#define MAX_CPUS_PER_CLUSTER 4
27#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070028
Olav Haugane3a3db92016-06-27 11:35:43 -070029struct cluster_data {
30 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070031 unsigned int min_cpus;
32 unsigned int max_cpus;
33 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070034 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
35 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070036 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053038 unsigned int nr_isolated_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070039 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070040 unsigned int need_cpus;
41 unsigned int task_thres;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053042 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070043 struct list_head lru;
44 bool pending;
45 spinlock_t pending_lock;
46 bool is_big_cluster;
47 int nrrun;
48 bool nrrun_changed;
Olav Haugan833926cb2016-06-27 11:38:06 -070049 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070050 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070051 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070052 struct kobject kobj;
53};
54
Olav Haugane3a3db92016-06-27 11:35:43 -070055struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070056 bool is_busy;
57 unsigned int busy;
58 unsigned int cpu;
59 bool not_preferred;
60 struct cluster_data *cluster;
61 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070062 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070063};
64
Olav Haugan9306c802016-08-18 17:22:44 -070065static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070066static struct cluster_data cluster_state[MAX_CLUSTERS];
67static unsigned int num_clusters;
68
69#define for_each_cluster(cluster, idx) \
70 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
71 (idx)++, (cluster) = &cluster_state[idx])
72
Olav Haugan9306c802016-08-18 17:22:44 -070073static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070074static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070075static void wake_up_core_ctl_thread(struct cluster_data *state);
76static bool initialized;
77
78static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070079
80/* ========================= sysfs interface =========================== */
81
Olav Haugane3a3db92016-06-27 11:35:43 -070082static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070083 const char *buf, size_t count)
84{
85 unsigned int val;
86
87 if (sscanf(buf, "%u\n", &val) != 1)
88 return -EINVAL;
89
90 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070091 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070092
93 return count;
94}
95
Olav Haugane3a3db92016-06-27 11:35:43 -070096static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070097{
98 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
99}
100
Olav Haugane3a3db92016-06-27 11:35:43 -0700101static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700102 const char *buf, size_t count)
103{
104 unsigned int val;
105
106 if (sscanf(buf, "%u\n", &val) != 1)
107 return -EINVAL;
108
109 val = min(val, state->num_cpus);
110 state->max_cpus = val;
111 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700112 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700113
114 return count;
115}
116
Olav Haugane3a3db92016-06-27 11:35:43 -0700117static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700118{
119 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
120}
121
Olav Haugane3a3db92016-06-27 11:35:43 -0700122static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700123 const char *buf, size_t count)
124{
125 unsigned int val;
126
127 if (sscanf(buf, "%u\n", &val) != 1)
128 return -EINVAL;
129
130 state->offline_delay_ms = val;
131 apply_need(state);
132
133 return count;
134}
135
Olav Haugane3a3db92016-06-27 11:35:43 -0700136static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700137{
138 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
139}
140
Olav Haugane3a3db92016-06-27 11:35:43 -0700141static ssize_t store_task_thres(struct cluster_data *state,
142 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700143{
144 unsigned int val;
145
146 if (sscanf(buf, "%u\n", &val) != 1)
147 return -EINVAL;
148
149 if (val < state->num_cpus)
150 return -EINVAL;
151
152 state->task_thres = val;
153 apply_need(state);
154
155 return count;
156}
157
Olav Haugane3a3db92016-06-27 11:35:43 -0700158static ssize_t show_offline_delay_ms(const struct cluster_data *state,
159 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700160{
161 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
162}
163
Olav Haugane3a3db92016-06-27 11:35:43 -0700164static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700165 const char *buf, size_t count)
166{
Olav Haugane3a3db92016-06-27 11:35:43 -0700167 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700168 int ret, i;
169
170 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
171 if (ret != 1 && ret != state->num_cpus)
172 return -EINVAL;
173
174 if (ret == 1) {
175 for (i = 0; i < state->num_cpus; i++)
176 state->busy_up_thres[i] = val[0];
177 } else {
178 for (i = 0; i < state->num_cpus; i++)
179 state->busy_up_thres[i] = val[i];
180 }
181 apply_need(state);
182 return count;
183}
184
Olav Haugane3a3db92016-06-27 11:35:43 -0700185static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700186{
187 int i, count = 0;
188
189 for (i = 0; i < state->num_cpus; i++)
190 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
191 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700192
Olav Haugan9306c802016-08-18 17:22:44 -0700193 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
194 return count;
195}
196
Olav Haugane3a3db92016-06-27 11:35:43 -0700197static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700198 const char *buf, size_t count)
199{
Olav Haugane3a3db92016-06-27 11:35:43 -0700200 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700201 int ret, i;
202
203 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
204 if (ret != 1 && ret != state->num_cpus)
205 return -EINVAL;
206
207 if (ret == 1) {
208 for (i = 0; i < state->num_cpus; i++)
209 state->busy_down_thres[i] = val[0];
210 } else {
211 for (i = 0; i < state->num_cpus; i++)
212 state->busy_down_thres[i] = val[i];
213 }
214 apply_need(state);
215 return count;
216}
217
Olav Haugane3a3db92016-06-27 11:35:43 -0700218static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700219{
220 int i, count = 0;
221
222 for (i = 0; i < state->num_cpus; i++)
223 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
224 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700225
Olav Haugan9306c802016-08-18 17:22:44 -0700226 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
227 return count;
228}
229
Olav Haugane3a3db92016-06-27 11:35:43 -0700230static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700231 const char *buf, size_t count)
232{
233 unsigned int val;
234
235 if (sscanf(buf, "%u\n", &val) != 1)
236 return -EINVAL;
237
238 state->is_big_cluster = val ? 1 : 0;
239 return count;
240}
241
Olav Haugane3a3db92016-06-27 11:35:43 -0700242static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700243{
244 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
245}
246
Olav Haugane3a3db92016-06-27 11:35:43 -0700247static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700248{
249 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
250}
251
Olav Haugan833926cb2016-06-27 11:38:06 -0700252static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700253{
Olav Haugan833926cb2016-06-27 11:38:06 -0700254 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700255}
256
Olav Haugane3a3db92016-06-27 11:35:43 -0700257static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700258{
259 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700260 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700261 ssize_t count = 0;
262 unsigned int cpu;
263
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530264 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700265 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700266 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700267 cluster = c->cluster;
268 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700269 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700270
Olav Haugan9306c802016-08-18 17:22:44 -0700271 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700272 "CPU%u\n", cpu);
273 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700274 "\tCPU: %u\n", c->cpu);
275 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530276 "\tOnline: %u\n",
277 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700278 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530279 "\tIsolated: %u\n",
280 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700281 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700282 "\tFirst CPU: %u\n",
283 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700284 count += snprintf(buf + count, PAGE_SIZE - count,
285 "\tBusy%%: %u\n", c->busy);
286 count += snprintf(buf + count, PAGE_SIZE - count,
287 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700288 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530289 "\tNot preferred: %u\n",
290 c->not_preferred);
291 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700292 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700293 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700294 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700295 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700296 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700297 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530298 "\tNr isolated CPUs: %u\n",
299 cluster->nr_isolated_cpus);
300 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700301 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700302 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530303 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700304
305 return count;
306}
307
Olav Haugane3a3db92016-06-27 11:35:43 -0700308static ssize_t store_not_preferred(struct cluster_data *state,
309 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700310{
311 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700312 unsigned int i;
313 unsigned int val[MAX_CPUS_PER_CLUSTER];
314 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700315 int ret;
316
317 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530318 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700319 return -EINVAL;
320
Olav Haugane3a3db92016-06-27 11:35:43 -0700321 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530322 for (i = 0; i < state->num_cpus; i++) {
323 c = &per_cpu(cpu_state, i + state->first_cpu);
324 c->not_preferred = val[i];
325 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700326 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700327
328 return count;
329}
330
Olav Haugane3a3db92016-06-27 11:35:43 -0700331static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700332{
333 struct cpu_data *c;
334 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700335 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530336 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700337
Olav Haugane3a3db92016-06-27 11:35:43 -0700338 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530339 for (i = 0; i < state->num_cpus; i++) {
340 c = &per_cpu(cpu_state, i + state->first_cpu);
341 count += scnprintf(buf + count, PAGE_SIZE - count,
342 "CPU#%d: %u\n", c->cpu, c->not_preferred);
343 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700344 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700345
346 return count;
347}
348
Olav Haugane3a3db92016-06-27 11:35:43 -0700349
Olav Haugan9306c802016-08-18 17:22:44 -0700350struct core_ctl_attr {
351 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700352 ssize_t (*show)(const struct cluster_data *, char *);
353 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700354};
355
356#define core_ctl_attr_ro(_name) \
357static struct core_ctl_attr _name = \
358__ATTR(_name, 0444, show_##_name, NULL)
359
360#define core_ctl_attr_rw(_name) \
361static struct core_ctl_attr _name = \
362__ATTR(_name, 0644, show_##_name, store_##_name)
363
364core_ctl_attr_rw(min_cpus);
365core_ctl_attr_rw(max_cpus);
366core_ctl_attr_rw(offline_delay_ms);
367core_ctl_attr_rw(busy_up_thres);
368core_ctl_attr_rw(busy_down_thres);
369core_ctl_attr_rw(task_thres);
370core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700371core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700372core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700373core_ctl_attr_ro(global_state);
374core_ctl_attr_rw(not_preferred);
375
376static struct attribute *default_attrs[] = {
377 &min_cpus.attr,
378 &max_cpus.attr,
379 &offline_delay_ms.attr,
380 &busy_up_thres.attr,
381 &busy_down_thres.attr,
382 &task_thres.attr,
383 &is_big_cluster.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700384 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700385 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700386 &global_state.attr,
387 &not_preferred.attr,
388 NULL
389};
390
Olav Haugane3a3db92016-06-27 11:35:43 -0700391#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700392#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
393static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
394{
Olav Haugane3a3db92016-06-27 11:35:43 -0700395 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700396 struct core_ctl_attr *cattr = to_attr(attr);
397 ssize_t ret = -EIO;
398
399 if (cattr->show)
400 ret = cattr->show(data, buf);
401
402 return ret;
403}
404
405static ssize_t store(struct kobject *kobj, struct attribute *attr,
406 const char *buf, size_t count)
407{
Olav Haugane3a3db92016-06-27 11:35:43 -0700408 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700409 struct core_ctl_attr *cattr = to_attr(attr);
410 ssize_t ret = -EIO;
411
412 if (cattr->store)
413 ret = cattr->store(data, buf, count);
414
415 return ret;
416}
417
418static const struct sysfs_ops sysfs_ops = {
419 .show = show,
420 .store = store,
421};
422
423static struct kobj_type ktype_core_ctl = {
424 .sysfs_ops = &sysfs_ops,
425 .default_attrs = default_attrs,
426};
427
428/* ==================== runqueue based core count =================== */
429
430#define RQ_AVG_TOLERANCE 2
431#define RQ_AVG_DEFAULT_MS 20
432#define NR_RUNNING_TOLERANCE 5
433static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
434
435static s64 rq_avg_timestamp_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700436
437static void update_running_avg(bool trigger_update)
438{
Olav Haugan9306c802016-08-18 17:22:44 -0700439 int avg, iowait_avg, big_avg, old_nrrun;
440 s64 now;
441 unsigned long flags;
Olav Haugane3a3db92016-06-27 11:35:43 -0700442 struct cluster_data *cluster;
443 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700444
445 spin_lock_irqsave(&state_lock, flags);
446
447 now = ktime_to_ms(ktime_get());
448 if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
449 spin_unlock_irqrestore(&state_lock, flags);
450 return;
451 }
452 rq_avg_timestamp_ms = now;
453 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
454
455 spin_unlock_irqrestore(&state_lock, flags);
456
457 /*
458 * Round up to the next integer if the average nr running tasks
459 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
460 * If normal rounding up is used, it will allow a transient task
461 * to trigger online event. By the time core is onlined, the task
462 * has finished.
463 * Rounding to closest suffers same problem because scheduler
464 * might only provide running stats per jiffy, and a transient
465 * task could skew the number for one jiffy. If core control
466 * samples every 2 jiffies, it will observe 0.5 additional running
467 * average which rounds up to 1 task.
468 */
469 avg = (avg + NR_RUNNING_TOLERANCE) / 100;
470 big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
471
Olav Haugane3a3db92016-06-27 11:35:43 -0700472 for_each_cluster(cluster, index) {
473 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700474 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700475 old_nrrun = cluster->nrrun;
Olav Haugan9306c802016-08-18 17:22:44 -0700476 /*
477 * Big cluster only need to take care of big tasks, but if
478 * there are not enough big cores, big tasks need to be run
479 * on little as well. Thus for little's runqueue stat, it
480 * has to use overall runqueue average, or derive what big
481 * tasks would have to be run on little. The latter approach
482 * is not easy to get given core control reacts much slower
483 * than scheduler, and can't predict scheduler's behavior.
484 */
Olav Haugane3a3db92016-06-27 11:35:43 -0700485 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
486 if (cluster->nrrun != old_nrrun) {
Olav Haugan9306c802016-08-18 17:22:44 -0700487 if (trigger_update)
Olav Haugane3a3db92016-06-27 11:35:43 -0700488 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700489 else
Olav Haugane3a3db92016-06-27 11:35:43 -0700490 cluster->nrrun_changed = true;
Olav Haugan9306c802016-08-18 17:22:44 -0700491 }
492 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700493 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700494}
495
496/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700497static unsigned int apply_task_need(const struct cluster_data *cluster,
498 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700499{
Olav Haugan833926cb2016-06-27 11:38:06 -0700500 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700501 if (cluster->nrrun >= cluster->task_thres)
502 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700503
Olav Haugan833926cb2016-06-27 11:38:06 -0700504 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700505 if (cluster->nrrun > new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700506 return new_need + 1;
507
508 return new_need;
509}
510
Olav Haugan9306c802016-08-18 17:22:44 -0700511/* ======================= load based core count ====================== */
512
Olav Haugane3a3db92016-06-27 11:35:43 -0700513static unsigned int apply_limits(const struct cluster_data *cluster,
514 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700515{
Olav Haugane3a3db92016-06-27 11:35:43 -0700516 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700517}
518
Olav Haugan833926cb2016-06-27 11:38:06 -0700519static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
520{
521 return cluster->num_cpus -
522 sched_isolate_count(&cluster->cpu_mask, true);
523}
524
525static bool is_active(const struct cpu_data *state)
526{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530527 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700528}
529
530static bool adjustment_possible(const struct cluster_data *cluster,
531 unsigned int need)
532{
533 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530534 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700535}
536
Olav Haugane3a3db92016-06-27 11:35:43 -0700537static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700538{
539 unsigned long flags;
540 struct cpu_data *c;
541 unsigned int need_cpus = 0, last_need, thres_idx;
542 int ret = 0;
543 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700544 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530545 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700546
Olav Haugane3a3db92016-06-27 11:35:43 -0700547 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700548 return 0;
549
550 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700551
552 if (cluster->boost) {
553 need_cpus = cluster->max_cpus;
554 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530555 cluster->active_cpus = get_active_cpu_count(cluster);
556 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700557 list_for_each_entry(c, &cluster->lru, sib) {
558 if (c->busy >= cluster->busy_up_thres[thres_idx])
559 c->is_busy = true;
560 else if (c->busy < cluster->busy_down_thres[thres_idx])
561 c->is_busy = false;
562 need_cpus += c->is_busy;
563 }
564 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700565 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700566 new_need = apply_limits(cluster, need_cpus);
567 need_flag = adjustment_possible(cluster, new_need);
568
Olav Haugane3a3db92016-06-27 11:35:43 -0700569 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530570 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700571
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530572 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700573 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530574 } else {
575 if (new_need == last_need) {
576 cluster->need_ts = now;
577 spin_unlock_irqrestore(&state_lock, flags);
578 return 0;
579 }
Olav Haugan9306c802016-08-18 17:22:44 -0700580
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530581 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700582 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700583 }
584
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530585 if (ret) {
586 cluster->need_ts = now;
587 cluster->need_cpus = new_need;
588 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530589 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700590 ret && need_flag);
591 spin_unlock_irqrestore(&state_lock, flags);
592
593 return ret && need_flag;
594}
595
Olav Haugane3a3db92016-06-27 11:35:43 -0700596static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700597{
Olav Haugane3a3db92016-06-27 11:35:43 -0700598 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700599 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700600}
601
602static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
603{
604 struct cpu_data *c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700605 struct cluster_data *cluster = c->cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700606 unsigned int old_is_busy = c->is_busy;
607
Olav Haugane3a3db92016-06-27 11:35:43 -0700608 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700609 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700610
611 update_running_avg(false);
Olav Haugane3a3db92016-06-27 11:35:43 -0700612 if (c->busy == busy && !cluster->nrrun_changed)
Olav Haugan9306c802016-08-18 17:22:44 -0700613 return 0;
614 c->busy = busy;
Olav Haugane3a3db92016-06-27 11:35:43 -0700615 cluster->nrrun_changed = false;
Olav Haugan9306c802016-08-18 17:22:44 -0700616
Olav Haugane3a3db92016-06-27 11:35:43 -0700617 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700618 trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
619 return 0;
620}
621
622/* ========================= core count enforcement ==================== */
623
Olav Haugan833926cb2016-06-27 11:38:06 -0700624static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700625{
626 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700627
Olav Haugane3a3db92016-06-27 11:35:43 -0700628 spin_lock_irqsave(&cluster->pending_lock, flags);
629 cluster->pending = true;
630 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700631
Olav Haugan833926cb2016-06-27 11:38:06 -0700632 wake_up_process_no_notif(cluster->core_ctl_thread);
633}
634
635static u64 core_ctl_check_timestamp;
636static u64 core_ctl_check_interval;
637
638static bool do_check(u64 wallclock)
639{
640 bool do_check = false;
641 unsigned long flags;
642
643 spin_lock_irqsave(&state_lock, flags);
644 if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
645 core_ctl_check_timestamp = wallclock;
646 do_check = true;
647 }
648 spin_unlock_irqrestore(&state_lock, flags);
649 return do_check;
650}
651
Olav Haugana024f472016-10-13 10:34:11 -0700652int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700653{
654 unsigned int index = 0;
655 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700656 unsigned long flags;
657 int ret = 0;
658 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700659
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530660 if (unlikely(!initialized))
661 return 0;
662
Olav Haugana024f472016-10-13 10:34:11 -0700663 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700664 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700665 if (cluster->is_big_cluster) {
666 if (boost) {
667 boost_state_changed = !cluster->boost;
668 ++cluster->boost;
669 } else {
670 if (!cluster->boost) {
671 pr_err("Error turning off boost. Boost already turned off\n");
672 ret = -EINVAL;
673 } else {
674 --cluster->boost;
675 boost_state_changed = !cluster->boost;
676 }
677 }
678 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700679 }
Olav Haugan9306c802016-08-18 17:22:44 -0700680 }
Olav Haugana024f472016-10-13 10:34:11 -0700681 spin_unlock_irqrestore(&state_lock, flags);
682
683 if (boost_state_changed)
684 apply_need(cluster);
685
686 trace_core_ctl_set_boost(cluster->boost, ret);
687
688 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700689}
Olav Haugan4d77e572016-11-14 16:14:23 -0800690EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700691
Olav Haugan833926cb2016-06-27 11:38:06 -0700692void core_ctl_check(u64 wallclock)
Olav Haugan9306c802016-08-18 17:22:44 -0700693{
Olav Haugan833926cb2016-06-27 11:38:06 -0700694 if (unlikely(!initialized))
695 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700696
Olav Haugan833926cb2016-06-27 11:38:06 -0700697 if (do_check(wallclock)) {
698 unsigned int index = 0;
699 struct cluster_data *cluster;
Olav Haugane3a3db92016-06-27 11:35:43 -0700700
Olav Haugan833926cb2016-06-27 11:38:06 -0700701 update_running_avg(true);
702
703 for_each_cluster(cluster, index) {
704 if (eval_need(cluster))
705 wake_up_core_ctl_thread(cluster);
706 }
Olav Haugan9306c802016-08-18 17:22:44 -0700707 }
Olav Haugan9306c802016-08-18 17:22:44 -0700708}
709
Olav Haugan833926cb2016-06-27 11:38:06 -0700710static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700711{
Olav Haugan833926cb2016-06-27 11:38:06 -0700712 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700713
Olav Haugan833926cb2016-06-27 11:38:06 -0700714 spin_lock_irqsave(&state_lock, flags);
715 list_del(&cpu_data->sib);
716 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
717 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700718}
719
Olav Haugan833926cb2016-06-27 11:38:06 -0700720static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700721{
Olav Haugan9306c802016-08-18 17:22:44 -0700722 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800723 unsigned long flags;
724 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530725 unsigned int nr_isolated = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700726
Olav Haugan85970732016-11-08 13:45:01 -0800727 /*
728 * Protect against entry being removed (and added at tail) by other
729 * thread (hotplug).
730 */
731 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700732 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800733 if (!num_cpus--)
734 break;
735
Olav Haugan833926cb2016-06-27 11:38:06 -0700736 if (!is_active(c))
737 continue;
738 if (cluster->active_cpus == need)
739 break;
740 /* Don't offline busy CPUs. */
741 if (c->is_busy)
742 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700743
Olav Haugan85970732016-11-08 13:45:01 -0800744 spin_unlock_irqrestore(&state_lock, flags);
745
Olav Haugan833926cb2016-06-27 11:38:06 -0700746 pr_debug("Trying to isolate CPU%u\n", c->cpu);
747 if (!sched_isolate_cpu(c->cpu)) {
748 c->isolated_by_us = true;
749 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530750 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700751 } else {
752 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700753 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700754 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800755 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700756 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530757 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800758 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700759
Olav Haugan833926cb2016-06-27 11:38:06 -0700760 /*
761 * If the number of active CPUs is within the limits, then
762 * don't force isolation of any busy CPUs.
763 */
764 if (cluster->active_cpus <= cluster->max_cpus)
765 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700766
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530767 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800768 num_cpus = cluster->num_cpus;
769 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700770 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800771 if (!num_cpus--)
772 break;
773
Olav Haugan833926cb2016-06-27 11:38:06 -0700774 if (!is_active(c))
775 continue;
776 if (cluster->active_cpus <= cluster->max_cpus)
777 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700778
Olav Haugan85970732016-11-08 13:45:01 -0800779 spin_unlock_irqrestore(&state_lock, flags);
780
Olav Haugan833926cb2016-06-27 11:38:06 -0700781 pr_debug("Trying to isolate CPU%u\n", c->cpu);
782 if (!sched_isolate_cpu(c->cpu)) {
783 c->isolated_by_us = true;
784 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530785 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700786 } else {
787 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700788 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700789 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800790 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700791 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530792 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800793 spin_unlock_irqrestore(&state_lock, flags);
794
Olav Haugan9306c802016-08-18 17:22:44 -0700795}
796
Olav Haugan833926cb2016-06-27 11:38:06 -0700797static void __try_to_unisolate(struct cluster_data *cluster,
798 unsigned int need, bool force)
799{
800 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800801 unsigned long flags;
802 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530803 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700804
Olav Haugan85970732016-11-08 13:45:01 -0800805 /*
806 * Protect against entry being removed (and added at tail) by other
807 * thread (hotplug).
808 */
809 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700810 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800811 if (!num_cpus--)
812 break;
813
Olav Haugan833926cb2016-06-27 11:38:06 -0700814 if (!c->isolated_by_us)
815 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530816 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700817 (!force && c->not_preferred))
818 continue;
819 if (cluster->active_cpus == need)
820 break;
821
Olav Haugan85970732016-11-08 13:45:01 -0800822 spin_unlock_irqrestore(&state_lock, flags);
823
Olav Haugan833926cb2016-06-27 11:38:06 -0700824 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
825 if (!sched_unisolate_cpu(c->cpu)) {
826 c->isolated_by_us = false;
827 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530828 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700829 } else {
830 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
831 }
832 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800833 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700834 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530835 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800836 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700837}
838
839static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
840{
841 bool force_use_non_preferred = false;
842
843 __try_to_unisolate(cluster, need, force_use_non_preferred);
844
845 if (cluster->active_cpus == need)
846 return;
847
848 force_use_non_preferred = true;
849 __try_to_unisolate(cluster, need, force_use_non_preferred);
850}
851
852static void __ref do_core_ctl(struct cluster_data *cluster)
853{
854 unsigned int need;
855
856 need = apply_limits(cluster, cluster->need_cpus);
857
858 if (adjustment_possible(cluster, need)) {
859 pr_debug("Trying to adjust group %u from %u to %u\n",
860 cluster->first_cpu, cluster->active_cpus, need);
861
862 if (cluster->active_cpus > need)
863 try_to_isolate(cluster, need);
864 else if (cluster->active_cpus < need)
865 try_to_unisolate(cluster, need);
866 }
867}
868
869static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700870{
Olav Haugane3a3db92016-06-27 11:35:43 -0700871 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700872 unsigned long flags;
873
874 while (1) {
875 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700876 spin_lock_irqsave(&cluster->pending_lock, flags);
877 if (!cluster->pending) {
878 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700879 schedule();
880 if (kthread_should_stop())
881 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700882 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700883 }
884 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700885 cluster->pending = false;
886 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700887
Olav Haugan833926cb2016-06-27 11:38:06 -0700888 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700889 }
890
891 return 0;
892}
893
894static int __ref cpu_callback(struct notifier_block *nfb,
895 unsigned long action, void *hcpu)
896{
897 uint32_t cpu = (uintptr_t)hcpu;
898 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700899 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700900 unsigned int need;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530901 bool do_wakeup, unisolated = false;
902 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700903
Olav Haugane3a3db92016-06-27 11:35:43 -0700904 if (unlikely(!cluster || !cluster->inited))
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530905 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700906
Olav Haugan1e8a44c2016-11-17 18:31:33 -0800907 switch (action & ~CPU_TASKS_FROZEN) {
Olav Haugan9306c802016-08-18 17:22:44 -0700908 case CPU_ONLINE:
Olav Haugan833926cb2016-06-27 11:38:06 -0700909 cluster->active_cpus = get_active_cpu_count(cluster);
910
Olav Haugan9306c802016-08-18 17:22:44 -0700911 /*
912 * Moving to the end of the list should only happen in
913 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
914 * infinite list traversal when thermal (or other entities)
915 * reject trying to online CPUs.
916 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700917 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700918 break;
919
920 case CPU_DEAD:
Olav Haugan833926cb2016-06-27 11:38:06 -0700921 /*
922 * We don't want to have a CPU both offline and isolated.
923 * So unisolate a CPU that went down if it was isolated by us.
924 */
925 if (state->isolated_by_us) {
926 sched_unisolate_cpu_unlocked(cpu);
927 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530928 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -0700929 }
930
Olav Haugan9306c802016-08-18 17:22:44 -0700931 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700932 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700933
Olav Haugan9306c802016-08-18 17:22:44 -0700934 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700935 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700936 break;
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530937 default:
938 return NOTIFY_DONE;
Olav Haugan9306c802016-08-18 17:22:44 -0700939 }
940
Olav Haugan833926cb2016-06-27 11:38:06 -0700941 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530942 spin_lock_irqsave(&state_lock, flags);
943 if (unisolated)
944 cluster->nr_isolated_cpus--;
945 do_wakeup = adjustment_possible(cluster, need);
946 spin_unlock_irqrestore(&state_lock, flags);
947 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -0700948 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700949
Pavankumar Kondeti046f2232017-04-13 18:58:56 +0530950 return NOTIFY_OK;
Olav Haugan9306c802016-08-18 17:22:44 -0700951}
952
953static struct notifier_block __refdata cpu_notifier = {
954 .notifier_call = cpu_callback,
955};
956
957/* ============================ init code ============================== */
958
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530959static cpumask_var_t core_ctl_disable_cpumask;
960static bool core_ctl_disable_cpumask_present;
961
962static int __init core_ctl_disable_setup(char *str)
963{
964 if (!*str)
965 return -EINVAL;
966
967 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
968
969 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
970 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
971 return -EINVAL;
972 }
973
974 core_ctl_disable_cpumask_present = true;
975 pr_info("disable_cpumask=%*pbl\n",
976 cpumask_pr_args(core_ctl_disable_cpumask));
977
978 return 0;
979}
980early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
981
982static bool should_skip(const struct cpumask *mask)
983{
984 if (!core_ctl_disable_cpumask_present)
985 return false;
986
987 /*
988 * We operate on a cluster basis. Disable the core_ctl for
989 * a cluster, if all of it's cpus are specified in
990 * core_ctl_disable_cpumask
991 */
992 return cpumask_subset(mask, core_ctl_disable_cpumask);
993}
994
Olav Haugane3a3db92016-06-27 11:35:43 -0700995static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
996{
997 unsigned int i;
998
999 for (i = 0; i < num_clusters; ++i) {
1000 if (cluster_state[i].first_cpu == first_cpu)
1001 return &cluster_state[i];
1002 }
1003
1004 return NULL;
1005}
1006
1007static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -07001008{
1009 struct device *dev;
1010 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001011 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001012 struct cpu_data *state;
1013 unsigned int cpu;
1014 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1015
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301016 if (should_skip(mask))
1017 return 0;
1018
Olav Haugane3a3db92016-06-27 11:35:43 -07001019 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001020 return 0;
1021
1022 dev = get_cpu_device(first_cpu);
1023 if (!dev)
1024 return -ENODEV;
1025
1026 pr_info("Creating CPU group %d\n", first_cpu);
1027
Olav Haugane3a3db92016-06-27 11:35:43 -07001028 if (num_clusters == MAX_CLUSTERS) {
1029 pr_err("Unsupported number of clusters. Only %u supported\n",
1030 MAX_CLUSTERS);
1031 return -EINVAL;
1032 }
1033 cluster = &cluster_state[num_clusters];
1034 ++num_clusters;
1035
Olav Haugan833926cb2016-06-27 11:38:06 -07001036 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001037 cluster->num_cpus = cpumask_weight(mask);
1038 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001039 pr_err("HW configuration not supported\n");
1040 return -EINVAL;
1041 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001042 cluster->first_cpu = first_cpu;
1043 cluster->min_cpus = 1;
1044 cluster->max_cpus = cluster->num_cpus;
1045 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001046 cluster->offline_delay_ms = 100;
1047 cluster->task_thres = UINT_MAX;
1048 cluster->nrrun = cluster->num_cpus;
1049 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001050 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001051
1052 for_each_cpu(cpu, mask) {
1053 pr_info("Init CPU%u state\n", cpu);
1054
1055 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001056 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001057 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001058 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001059 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001060 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001061
Olav Haugan833926cb2016-06-27 11:38:06 -07001062 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001063 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001064 if (IS_ERR(cluster->core_ctl_thread))
1065 return PTR_ERR(cluster->core_ctl_thread);
1066
1067 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001068 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001069
Olav Haugane3a3db92016-06-27 11:35:43 -07001070 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001071
Olav Haugane3a3db92016-06-27 11:35:43 -07001072 kobject_init(&cluster->kobj, &ktype_core_ctl);
1073 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001074}
1075
1076static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
1077 void *data)
1078{
1079 struct cpufreq_policy *policy = data;
Olav Haugan833926cb2016-06-27 11:38:06 -07001080 int ret;
Olav Haugan9306c802016-08-18 17:22:44 -07001081
1082 switch (val) {
1083 case CPUFREQ_CREATE_POLICY:
Olav Haugan833926cb2016-06-27 11:38:06 -07001084 ret = cluster_init(policy->related_cpus);
1085 if (ret)
1086 pr_warn("unable to create core ctl group: %d\n", ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001087 break;
1088 }
1089
1090 return NOTIFY_OK;
1091}
1092
1093static struct notifier_block cpufreq_pol_nb = {
1094 .notifier_call = cpufreq_policy_cb,
1095};
1096
1097static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
1098 void *data)
1099{
1100 struct cpufreq_govinfo *info = data;
1101
1102 switch (val) {
1103 case CPUFREQ_LOAD_CHANGE:
1104 core_ctl_set_busy(info->cpu, info->load);
1105 break;
1106 }
1107
1108 return NOTIFY_OK;
1109}
1110
1111static struct notifier_block cpufreq_gov_nb = {
1112 .notifier_call = cpufreq_gov_cb,
1113};
1114
1115static int __init core_ctl_init(void)
1116{
Olav Haugan9306c802016-08-18 17:22:44 -07001117 unsigned int cpu;
1118
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301119 if (should_skip(cpu_possible_mask))
1120 return 0;
1121
Olav Haugan833926cb2016-06-27 11:38:06 -07001122 core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
1123 * NSEC_PER_MSEC;
1124
Olav Haugan9306c802016-08-18 17:22:44 -07001125 register_cpu_notifier(&cpu_notifier);
1126 cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
1127 cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
Olav Haugan9306c802016-08-18 17:22:44 -07001128
Olav Haugan1e8a44c2016-11-17 18:31:33 -08001129 cpu_maps_update_begin();
Olav Haugan9306c802016-08-18 17:22:44 -07001130 for_each_online_cpu(cpu) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001131 struct cpufreq_policy *policy;
1132 int ret;
1133
Olav Haugan9306c802016-08-18 17:22:44 -07001134 policy = cpufreq_cpu_get(cpu);
1135 if (policy) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001136 ret = cluster_init(policy->related_cpus);
1137 if (ret)
1138 pr_warn("unable to create core ctl group: %d\n"
1139 , ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001140 cpufreq_cpu_put(policy);
1141 }
1142 }
Olav Haugan1e8a44c2016-11-17 18:31:33 -08001143 cpu_maps_update_done();
Olav Haugan833926cb2016-06-27 11:38:06 -07001144 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001145 return 0;
1146}
1147
1148late_initcall(core_ctl_init);