blob: ecf6c568f0b525e12838ef6ca8a3d8d59a6f71cf [file] [log] [blame]
Olav Haugan9306c802016-08-18 17:22:44 -07001/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/init.h>
14#include <linux/notifier.h>
15#include <linux/cpu.h>
16#include <linux/cpumask.h>
17#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070018#include <linux/kthread.h>
19#include <linux/sched.h>
20#include <linux/sched/rt.h>
21
Olav Haugan09bcc682016-09-02 17:12:20 -070022#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070023
Olav Haugane3a3db92016-06-27 11:35:43 -070024#define MAX_CPUS_PER_CLUSTER 4
25#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070026
Olav Haugane3a3db92016-06-27 11:35:43 -070027struct cluster_data {
28 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070029 unsigned int min_cpus;
30 unsigned int max_cpus;
31 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070032 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
33 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070034 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070035 unsigned int num_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070036 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int need_cpus;
38 unsigned int task_thres;
Olav Haugan833926cb2016-06-27 11:38:06 -070039 s64 last_isolate_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070040 struct list_head lru;
41 bool pending;
42 spinlock_t pending_lock;
43 bool is_big_cluster;
44 int nrrun;
45 bool nrrun_changed;
Olav Haugan833926cb2016-06-27 11:38:06 -070046 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070047 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070048 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070049 struct kobject kobj;
50};
51
Olav Haugane3a3db92016-06-27 11:35:43 -070052struct cpu_data {
53 bool online;
Olav Haugane3a3db92016-06-27 11:35:43 -070054 bool is_busy;
55 unsigned int busy;
56 unsigned int cpu;
57 bool not_preferred;
58 struct cluster_data *cluster;
59 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070060 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070061};
62
Olav Haugan9306c802016-08-18 17:22:44 -070063static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070064static struct cluster_data cluster_state[MAX_CLUSTERS];
65static unsigned int num_clusters;
66
67#define for_each_cluster(cluster, idx) \
68 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
69 (idx)++, (cluster) = &cluster_state[idx])
70
Olav Haugan9306c802016-08-18 17:22:44 -070071static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070072static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070073static void wake_up_core_ctl_thread(struct cluster_data *state);
74static bool initialized;
75
76static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070077
78/* ========================= sysfs interface =========================== */
79
Olav Haugane3a3db92016-06-27 11:35:43 -070080static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070081 const char *buf, size_t count)
82{
83 unsigned int val;
84
85 if (sscanf(buf, "%u\n", &val) != 1)
86 return -EINVAL;
87
88 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070089 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070090
91 return count;
92}
93
Olav Haugane3a3db92016-06-27 11:35:43 -070094static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070095{
96 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
97}
98
Olav Haugane3a3db92016-06-27 11:35:43 -070099static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700100 const char *buf, size_t count)
101{
102 unsigned int val;
103
104 if (sscanf(buf, "%u\n", &val) != 1)
105 return -EINVAL;
106
107 val = min(val, state->num_cpus);
108 state->max_cpus = val;
109 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700110 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700111
112 return count;
113}
114
Olav Haugane3a3db92016-06-27 11:35:43 -0700115static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700116{
117 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
118}
119
Olav Haugane3a3db92016-06-27 11:35:43 -0700120static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700121 const char *buf, size_t count)
122{
123 unsigned int val;
124
125 if (sscanf(buf, "%u\n", &val) != 1)
126 return -EINVAL;
127
128 state->offline_delay_ms = val;
129 apply_need(state);
130
131 return count;
132}
133
Olav Haugane3a3db92016-06-27 11:35:43 -0700134static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700135{
136 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
137}
138
Olav Haugane3a3db92016-06-27 11:35:43 -0700139static ssize_t store_task_thres(struct cluster_data *state,
140 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700141{
142 unsigned int val;
143
144 if (sscanf(buf, "%u\n", &val) != 1)
145 return -EINVAL;
146
147 if (val < state->num_cpus)
148 return -EINVAL;
149
150 state->task_thres = val;
151 apply_need(state);
152
153 return count;
154}
155
Olav Haugane3a3db92016-06-27 11:35:43 -0700156static ssize_t show_offline_delay_ms(const struct cluster_data *state,
157 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700158{
159 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
160}
161
Olav Haugane3a3db92016-06-27 11:35:43 -0700162static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700163 const char *buf, size_t count)
164{
Olav Haugane3a3db92016-06-27 11:35:43 -0700165 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700166 int ret, i;
167
168 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
169 if (ret != 1 && ret != state->num_cpus)
170 return -EINVAL;
171
172 if (ret == 1) {
173 for (i = 0; i < state->num_cpus; i++)
174 state->busy_up_thres[i] = val[0];
175 } else {
176 for (i = 0; i < state->num_cpus; i++)
177 state->busy_up_thres[i] = val[i];
178 }
179 apply_need(state);
180 return count;
181}
182
Olav Haugane3a3db92016-06-27 11:35:43 -0700183static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700184{
185 int i, count = 0;
186
187 for (i = 0; i < state->num_cpus; i++)
188 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
189 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700190
Olav Haugan9306c802016-08-18 17:22:44 -0700191 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
192 return count;
193}
194
Olav Haugane3a3db92016-06-27 11:35:43 -0700195static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700196 const char *buf, size_t count)
197{
Olav Haugane3a3db92016-06-27 11:35:43 -0700198 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700199 int ret, i;
200
201 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
202 if (ret != 1 && ret != state->num_cpus)
203 return -EINVAL;
204
205 if (ret == 1) {
206 for (i = 0; i < state->num_cpus; i++)
207 state->busy_down_thres[i] = val[0];
208 } else {
209 for (i = 0; i < state->num_cpus; i++)
210 state->busy_down_thres[i] = val[i];
211 }
212 apply_need(state);
213 return count;
214}
215
Olav Haugane3a3db92016-06-27 11:35:43 -0700216static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700217{
218 int i, count = 0;
219
220 for (i = 0; i < state->num_cpus; i++)
221 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
222 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700223
Olav Haugan9306c802016-08-18 17:22:44 -0700224 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
225 return count;
226}
227
Olav Haugane3a3db92016-06-27 11:35:43 -0700228static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700229 const char *buf, size_t count)
230{
231 unsigned int val;
232
233 if (sscanf(buf, "%u\n", &val) != 1)
234 return -EINVAL;
235
236 state->is_big_cluster = val ? 1 : 0;
237 return count;
238}
239
Olav Haugane3a3db92016-06-27 11:35:43 -0700240static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700241{
242 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
243}
244
Olav Haugane3a3db92016-06-27 11:35:43 -0700245static ssize_t show_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700246{
247 struct cpu_data *c;
248 ssize_t count = 0;
249 unsigned long flags;
250
251 spin_lock_irqsave(&state_lock, flags);
252 list_for_each_entry(c, &state->lru, sib) {
253 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700254 "CPU%u (%s)\n", c->cpu,
255 c->online ? "Online" : "Offline");
Olav Haugan9306c802016-08-18 17:22:44 -0700256 }
257 spin_unlock_irqrestore(&state_lock, flags);
258 return count;
259}
260
Olav Haugane3a3db92016-06-27 11:35:43 -0700261static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700262{
263 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
264}
265
Olav Haugan833926cb2016-06-27 11:38:06 -0700266static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700267{
Olav Haugan833926cb2016-06-27 11:38:06 -0700268 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700269}
270
Olav Haugane3a3db92016-06-27 11:35:43 -0700271static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700272{
273 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700274 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700275 ssize_t count = 0;
276 unsigned int cpu;
277
278 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700279 c = &per_cpu(cpu_state, cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700280 if (!c->cluster)
281 continue;
282
Olav Haugane3a3db92016-06-27 11:35:43 -0700283 cluster = c->cluster;
284 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700285 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700286
Olav Haugan9306c802016-08-18 17:22:44 -0700287 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700288 "CPU%u\n", cpu);
289 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700290 "\tCPU: %u\n", c->cpu);
291 count += snprintf(buf + count, PAGE_SIZE - count,
292 "\tOnline: %u\n", c->online);
293 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700294 "\tActive: %u\n",
295 !cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700296 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700297 "\tFirst CPU: %u\n",
298 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700299 count += snprintf(buf + count, PAGE_SIZE - count,
300 "\tBusy%%: %u\n", c->busy);
301 count += snprintf(buf + count, PAGE_SIZE - count,
302 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700303 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700304 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700305 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700306 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700307 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700308 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700309 count += snprintf(buf + count, PAGE_SIZE - count,
310 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700311 }
312
313 return count;
314}
315
Olav Haugane3a3db92016-06-27 11:35:43 -0700316static ssize_t store_not_preferred(struct cluster_data *state,
317 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700318{
319 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700320 unsigned int i;
321 unsigned int val[MAX_CPUS_PER_CLUSTER];
322 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700323 int ret;
324
325 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
326 if (ret != 1 && ret != state->num_cpus)
327 return -EINVAL;
328
Olav Haugane3a3db92016-06-27 11:35:43 -0700329 i = 0;
330 spin_lock_irqsave(&state_lock, flags);
331 list_for_each_entry(c, &state->lru, sib)
332 c->not_preferred = val[i++];
333 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700334
335 return count;
336}
337
Olav Haugane3a3db92016-06-27 11:35:43 -0700338static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700339{
340 struct cpu_data *c;
341 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700342 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700343
Olav Haugane3a3db92016-06-27 11:35:43 -0700344 spin_lock_irqsave(&state_lock, flags);
345 list_for_each_entry(c, &state->lru, sib)
Olav Haugan9306c802016-08-18 17:22:44 -0700346 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700347 "\tCPU:%d %u\n", c->cpu, c->not_preferred);
348 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700349
350 return count;
351}
352
Olav Haugane3a3db92016-06-27 11:35:43 -0700353
Olav Haugan9306c802016-08-18 17:22:44 -0700354struct core_ctl_attr {
355 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700356 ssize_t (*show)(const struct cluster_data *, char *);
357 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700358};
359
360#define core_ctl_attr_ro(_name) \
361static struct core_ctl_attr _name = \
362__ATTR(_name, 0444, show_##_name, NULL)
363
364#define core_ctl_attr_rw(_name) \
365static struct core_ctl_attr _name = \
366__ATTR(_name, 0644, show_##_name, store_##_name)
367
368core_ctl_attr_rw(min_cpus);
369core_ctl_attr_rw(max_cpus);
370core_ctl_attr_rw(offline_delay_ms);
371core_ctl_attr_rw(busy_up_thres);
372core_ctl_attr_rw(busy_down_thres);
373core_ctl_attr_rw(task_thres);
374core_ctl_attr_rw(is_big_cluster);
375core_ctl_attr_ro(cpus);
376core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700377core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700378core_ctl_attr_ro(global_state);
379core_ctl_attr_rw(not_preferred);
380
381static struct attribute *default_attrs[] = {
382 &min_cpus.attr,
383 &max_cpus.attr,
384 &offline_delay_ms.attr,
385 &busy_up_thres.attr,
386 &busy_down_thres.attr,
387 &task_thres.attr,
388 &is_big_cluster.attr,
389 &cpus.attr,
390 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700391 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700392 &global_state.attr,
393 &not_preferred.attr,
394 NULL
395};
396
Olav Haugane3a3db92016-06-27 11:35:43 -0700397#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700398#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
399static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
400{
Olav Haugane3a3db92016-06-27 11:35:43 -0700401 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700402 struct core_ctl_attr *cattr = to_attr(attr);
403 ssize_t ret = -EIO;
404
405 if (cattr->show)
406 ret = cattr->show(data, buf);
407
408 return ret;
409}
410
411static ssize_t store(struct kobject *kobj, struct attribute *attr,
412 const char *buf, size_t count)
413{
Olav Haugane3a3db92016-06-27 11:35:43 -0700414 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700415 struct core_ctl_attr *cattr = to_attr(attr);
416 ssize_t ret = -EIO;
417
418 if (cattr->store)
419 ret = cattr->store(data, buf, count);
420
421 return ret;
422}
423
424static const struct sysfs_ops sysfs_ops = {
425 .show = show,
426 .store = store,
427};
428
429static struct kobj_type ktype_core_ctl = {
430 .sysfs_ops = &sysfs_ops,
431 .default_attrs = default_attrs,
432};
433
434/* ==================== runqueue based core count =================== */
435
436#define RQ_AVG_TOLERANCE 2
437#define RQ_AVG_DEFAULT_MS 20
438#define NR_RUNNING_TOLERANCE 5
439static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
440
441static s64 rq_avg_timestamp_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700442
443static void update_running_avg(bool trigger_update)
444{
Olav Haugan9306c802016-08-18 17:22:44 -0700445 int avg, iowait_avg, big_avg, old_nrrun;
446 s64 now;
447 unsigned long flags;
Olav Haugane3a3db92016-06-27 11:35:43 -0700448 struct cluster_data *cluster;
449 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700450
451 spin_lock_irqsave(&state_lock, flags);
452
453 now = ktime_to_ms(ktime_get());
454 if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
455 spin_unlock_irqrestore(&state_lock, flags);
456 return;
457 }
458 rq_avg_timestamp_ms = now;
459 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
460
461 spin_unlock_irqrestore(&state_lock, flags);
462
463 /*
464 * Round up to the next integer if the average nr running tasks
465 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
466 * If normal rounding up is used, it will allow a transient task
467 * to trigger online event. By the time core is onlined, the task
468 * has finished.
469 * Rounding to closest suffers same problem because scheduler
470 * might only provide running stats per jiffy, and a transient
471 * task could skew the number for one jiffy. If core control
472 * samples every 2 jiffies, it will observe 0.5 additional running
473 * average which rounds up to 1 task.
474 */
475 avg = (avg + NR_RUNNING_TOLERANCE) / 100;
476 big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
477
Olav Haugane3a3db92016-06-27 11:35:43 -0700478 for_each_cluster(cluster, index) {
479 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700480 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700481 old_nrrun = cluster->nrrun;
Olav Haugan9306c802016-08-18 17:22:44 -0700482 /*
483 * Big cluster only need to take care of big tasks, but if
484 * there are not enough big cores, big tasks need to be run
485 * on little as well. Thus for little's runqueue stat, it
486 * has to use overall runqueue average, or derive what big
487 * tasks would have to be run on little. The latter approach
488 * is not easy to get given core control reacts much slower
489 * than scheduler, and can't predict scheduler's behavior.
490 */
Olav Haugane3a3db92016-06-27 11:35:43 -0700491 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
492 if (cluster->nrrun != old_nrrun) {
Olav Haugan9306c802016-08-18 17:22:44 -0700493 if (trigger_update)
Olav Haugane3a3db92016-06-27 11:35:43 -0700494 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700495 else
Olav Haugane3a3db92016-06-27 11:35:43 -0700496 cluster->nrrun_changed = true;
Olav Haugan9306c802016-08-18 17:22:44 -0700497 }
498 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700499 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700500}
501
502/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700503static unsigned int apply_task_need(const struct cluster_data *cluster,
504 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700505{
Olav Haugan833926cb2016-06-27 11:38:06 -0700506 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700507 if (cluster->nrrun >= cluster->task_thres)
508 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700509
Olav Haugan833926cb2016-06-27 11:38:06 -0700510 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700511 if (cluster->nrrun > new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700512 return new_need + 1;
513
514 return new_need;
515}
516
Olav Haugan9306c802016-08-18 17:22:44 -0700517/* ======================= load based core count ====================== */
518
Olav Haugane3a3db92016-06-27 11:35:43 -0700519static unsigned int apply_limits(const struct cluster_data *cluster,
520 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700521{
Olav Haugane3a3db92016-06-27 11:35:43 -0700522 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700523}
524
Olav Haugan833926cb2016-06-27 11:38:06 -0700525static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
526{
527 return cluster->num_cpus -
528 sched_isolate_count(&cluster->cpu_mask, true);
529}
530
531static bool is_active(const struct cpu_data *state)
532{
533 return state->online && !cpu_isolated(state->cpu);
534}
535
536static bool adjustment_possible(const struct cluster_data *cluster,
537 unsigned int need)
538{
539 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
540 sched_isolate_count(&cluster->cpu_mask, false)));
541}
542
Olav Haugane3a3db92016-06-27 11:35:43 -0700543static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700544{
545 unsigned long flags;
546 struct cpu_data *c;
547 unsigned int need_cpus = 0, last_need, thres_idx;
548 int ret = 0;
549 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700550 unsigned int active_cpus;
551 unsigned int new_need;
Olav Haugan9306c802016-08-18 17:22:44 -0700552
Olav Haugane3a3db92016-06-27 11:35:43 -0700553 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700554 return 0;
555
556 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700557
558 if (cluster->boost) {
559 need_cpus = cluster->max_cpus;
560 } else {
561 active_cpus = get_active_cpu_count(cluster);
562 thres_idx = active_cpus ? active_cpus - 1 : 0;
563 list_for_each_entry(c, &cluster->lru, sib) {
564 if (c->busy >= cluster->busy_up_thres[thres_idx])
565 c->is_busy = true;
566 else if (c->busy < cluster->busy_down_thres[thres_idx])
567 c->is_busy = false;
568 need_cpus += c->is_busy;
569 }
570 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700571 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700572 new_need = apply_limits(cluster, need_cpus);
573 need_flag = adjustment_possible(cluster, new_need);
574
Olav Haugane3a3db92016-06-27 11:35:43 -0700575 last_need = cluster->need_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -0700576 cluster->need_cpus = new_need;
Olav Haugan9306c802016-08-18 17:22:44 -0700577
Olav Haugan833926cb2016-06-27 11:38:06 -0700578 if (!need_flag) {
Olav Haugan9306c802016-08-18 17:22:44 -0700579 spin_unlock_irqrestore(&state_lock, flags);
580 return 0;
581 }
582
Olav Haugan833926cb2016-06-27 11:38:06 -0700583 if (need_cpus > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700584 ret = 1;
Olav Haugan833926cb2016-06-27 11:38:06 -0700585 } else if (need_cpus < cluster->active_cpus) {
586 s64 now = ktime_to_ms(ktime_get());
587 s64 elapsed = now - cluster->last_isolate_ts;
Olav Haugan9306c802016-08-18 17:22:44 -0700588
Olav Haugan833926cb2016-06-27 11:38:06 -0700589 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700590 }
591
Olav Haugane3a3db92016-06-27 11:35:43 -0700592 trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
Olav Haugan9306c802016-08-18 17:22:44 -0700593 ret && need_flag);
594 spin_unlock_irqrestore(&state_lock, flags);
595
596 return ret && need_flag;
597}
598
Olav Haugane3a3db92016-06-27 11:35:43 -0700599static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700600{
Olav Haugane3a3db92016-06-27 11:35:43 -0700601 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700602 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700603}
604
605static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
606{
607 struct cpu_data *c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700608 struct cluster_data *cluster = c->cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700609 unsigned int old_is_busy = c->is_busy;
610
Olav Haugane3a3db92016-06-27 11:35:43 -0700611 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700612 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700613
614 update_running_avg(false);
Olav Haugane3a3db92016-06-27 11:35:43 -0700615 if (c->busy == busy && !cluster->nrrun_changed)
Olav Haugan9306c802016-08-18 17:22:44 -0700616 return 0;
617 c->busy = busy;
Olav Haugane3a3db92016-06-27 11:35:43 -0700618 cluster->nrrun_changed = false;
Olav Haugan9306c802016-08-18 17:22:44 -0700619
Olav Haugane3a3db92016-06-27 11:35:43 -0700620 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700621 trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
622 return 0;
623}
624
625/* ========================= core count enforcement ==================== */
626
Olav Haugan833926cb2016-06-27 11:38:06 -0700627static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700628{
629 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700630
Olav Haugane3a3db92016-06-27 11:35:43 -0700631 spin_lock_irqsave(&cluster->pending_lock, flags);
632 cluster->pending = true;
633 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700634
Olav Haugan833926cb2016-06-27 11:38:06 -0700635 wake_up_process_no_notif(cluster->core_ctl_thread);
636}
637
638static u64 core_ctl_check_timestamp;
639static u64 core_ctl_check_interval;
640
641static bool do_check(u64 wallclock)
642{
643 bool do_check = false;
644 unsigned long flags;
645
646 spin_lock_irqsave(&state_lock, flags);
647 if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
648 core_ctl_check_timestamp = wallclock;
649 do_check = true;
650 }
651 spin_unlock_irqrestore(&state_lock, flags);
652 return do_check;
653}
654
Olav Haugana024f472016-10-13 10:34:11 -0700655int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700656{
657 unsigned int index = 0;
658 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700659 unsigned long flags;
660 int ret = 0;
661 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700662
Olav Haugana024f472016-10-13 10:34:11 -0700663 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700664 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700665 if (cluster->is_big_cluster) {
666 if (boost) {
667 boost_state_changed = !cluster->boost;
668 ++cluster->boost;
669 } else {
670 if (!cluster->boost) {
671 pr_err("Error turning off boost. Boost already turned off\n");
672 ret = -EINVAL;
673 } else {
674 --cluster->boost;
675 boost_state_changed = !cluster->boost;
676 }
677 }
678 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700679 }
Olav Haugan9306c802016-08-18 17:22:44 -0700680 }
Olav Haugana024f472016-10-13 10:34:11 -0700681 spin_unlock_irqrestore(&state_lock, flags);
682
683 if (boost_state_changed)
684 apply_need(cluster);
685
686 trace_core_ctl_set_boost(cluster->boost, ret);
687
688 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700689}
690
Olav Haugan833926cb2016-06-27 11:38:06 -0700691void core_ctl_check(u64 wallclock)
Olav Haugan9306c802016-08-18 17:22:44 -0700692{
Olav Haugan833926cb2016-06-27 11:38:06 -0700693 if (unlikely(!initialized))
694 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700695
Olav Haugan833926cb2016-06-27 11:38:06 -0700696 if (do_check(wallclock)) {
697 unsigned int index = 0;
698 struct cluster_data *cluster;
Olav Haugane3a3db92016-06-27 11:35:43 -0700699
Olav Haugan833926cb2016-06-27 11:38:06 -0700700 update_running_avg(true);
701
702 for_each_cluster(cluster, index) {
703 if (eval_need(cluster))
704 wake_up_core_ctl_thread(cluster);
705 }
Olav Haugan9306c802016-08-18 17:22:44 -0700706 }
Olav Haugan9306c802016-08-18 17:22:44 -0700707}
708
Olav Haugan833926cb2016-06-27 11:38:06 -0700709static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700710{
Olav Haugan833926cb2016-06-27 11:38:06 -0700711 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700712
Olav Haugan833926cb2016-06-27 11:38:06 -0700713 spin_lock_irqsave(&state_lock, flags);
714 list_del(&cpu_data->sib);
715 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
716 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700717}
718
Olav Haugan833926cb2016-06-27 11:38:06 -0700719static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700720{
Olav Haugan9306c802016-08-18 17:22:44 -0700721 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800722 unsigned long flags;
723 unsigned int num_cpus = cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700724
Olav Haugan85970732016-11-08 13:45:01 -0800725 /*
726 * Protect against entry being removed (and added at tail) by other
727 * thread (hotplug).
728 */
729 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700730 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800731 if (!num_cpus--)
732 break;
733
Olav Haugan833926cb2016-06-27 11:38:06 -0700734 if (!is_active(c))
735 continue;
736 if (cluster->active_cpus == need)
737 break;
738 /* Don't offline busy CPUs. */
739 if (c->is_busy)
740 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700741
Olav Haugan85970732016-11-08 13:45:01 -0800742 spin_unlock_irqrestore(&state_lock, flags);
743
Olav Haugan833926cb2016-06-27 11:38:06 -0700744 pr_debug("Trying to isolate CPU%u\n", c->cpu);
745 if (!sched_isolate_cpu(c->cpu)) {
746 c->isolated_by_us = true;
747 move_cpu_lru(c);
748 cluster->last_isolate_ts = ktime_to_ms(ktime_get());
749 } else {
750 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700751 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700752 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800753 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700754 }
Olav Haugan85970732016-11-08 13:45:01 -0800755 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700756
Olav Haugan833926cb2016-06-27 11:38:06 -0700757 /*
758 * If the number of active CPUs is within the limits, then
759 * don't force isolation of any busy CPUs.
760 */
761 if (cluster->active_cpus <= cluster->max_cpus)
762 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700763
Olav Haugan85970732016-11-08 13:45:01 -0800764 num_cpus = cluster->num_cpus;
765 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700766 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800767 if (!num_cpus--)
768 break;
769
Olav Haugan833926cb2016-06-27 11:38:06 -0700770 if (!is_active(c))
771 continue;
772 if (cluster->active_cpus <= cluster->max_cpus)
773 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700774
Olav Haugan85970732016-11-08 13:45:01 -0800775 spin_unlock_irqrestore(&state_lock, flags);
776
Olav Haugan833926cb2016-06-27 11:38:06 -0700777 pr_debug("Trying to isolate CPU%u\n", c->cpu);
778 if (!sched_isolate_cpu(c->cpu)) {
779 c->isolated_by_us = true;
780 move_cpu_lru(c);
781 cluster->last_isolate_ts = ktime_to_ms(ktime_get());
782 } else {
783 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700784 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700785 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800786 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700787 }
Olav Haugan85970732016-11-08 13:45:01 -0800788 spin_unlock_irqrestore(&state_lock, flags);
789
Olav Haugan9306c802016-08-18 17:22:44 -0700790}
791
Olav Haugan833926cb2016-06-27 11:38:06 -0700792static void __try_to_unisolate(struct cluster_data *cluster,
793 unsigned int need, bool force)
794{
795 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800796 unsigned long flags;
797 unsigned int num_cpus = cluster->num_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -0700798
Olav Haugan85970732016-11-08 13:45:01 -0800799 /*
800 * Protect against entry being removed (and added at tail) by other
801 * thread (hotplug).
802 */
803 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700804 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800805 if (!num_cpus--)
806 break;
807
Olav Haugan833926cb2016-06-27 11:38:06 -0700808 if (!c->isolated_by_us)
809 continue;
810 if ((c->online && !cpu_isolated(c->cpu)) ||
811 (!force && c->not_preferred))
812 continue;
813 if (cluster->active_cpus == need)
814 break;
815
Olav Haugan85970732016-11-08 13:45:01 -0800816 spin_unlock_irqrestore(&state_lock, flags);
817
Olav Haugan833926cb2016-06-27 11:38:06 -0700818 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
819 if (!sched_unisolate_cpu(c->cpu)) {
820 c->isolated_by_us = false;
821 move_cpu_lru(c);
822 } else {
823 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
824 }
825 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800826 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700827 }
Olav Haugan85970732016-11-08 13:45:01 -0800828 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700829}
830
831static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
832{
833 bool force_use_non_preferred = false;
834
835 __try_to_unisolate(cluster, need, force_use_non_preferred);
836
837 if (cluster->active_cpus == need)
838 return;
839
840 force_use_non_preferred = true;
841 __try_to_unisolate(cluster, need, force_use_non_preferred);
842}
843
844static void __ref do_core_ctl(struct cluster_data *cluster)
845{
846 unsigned int need;
847
848 need = apply_limits(cluster, cluster->need_cpus);
849
850 if (adjustment_possible(cluster, need)) {
851 pr_debug("Trying to adjust group %u from %u to %u\n",
852 cluster->first_cpu, cluster->active_cpus, need);
853
854 if (cluster->active_cpus > need)
855 try_to_isolate(cluster, need);
856 else if (cluster->active_cpus < need)
857 try_to_unisolate(cluster, need);
858 }
859}
860
861static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700862{
Olav Haugane3a3db92016-06-27 11:35:43 -0700863 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700864 unsigned long flags;
865
866 while (1) {
867 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700868 spin_lock_irqsave(&cluster->pending_lock, flags);
869 if (!cluster->pending) {
870 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700871 schedule();
872 if (kthread_should_stop())
873 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700874 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700875 }
876 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700877 cluster->pending = false;
878 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700879
Olav Haugan833926cb2016-06-27 11:38:06 -0700880 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700881 }
882
883 return 0;
884}
885
886static int __ref cpu_callback(struct notifier_block *nfb,
887 unsigned long action, void *hcpu)
888{
889 uint32_t cpu = (uintptr_t)hcpu;
890 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700891 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700892 unsigned int need;
Olav Haugan9306c802016-08-18 17:22:44 -0700893 int ret = NOTIFY_OK;
Olav Haugan9306c802016-08-18 17:22:44 -0700894
895 /* Don't affect suspend resume */
896 if (action & CPU_TASKS_FROZEN)
897 return NOTIFY_OK;
898
Olav Haugane3a3db92016-06-27 11:35:43 -0700899 if (unlikely(!cluster || !cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700900 return NOTIFY_OK;
901
Olav Haugan9306c802016-08-18 17:22:44 -0700902 switch (action) {
903 case CPU_UP_PREPARE:
904
905 /* If online state of CPU somehow got out of sync, fix it. */
906 if (state->online) {
Olav Haugan9306c802016-08-18 17:22:44 -0700907 state->online = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700908 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700909 pr_warn("CPU%d offline when state is online\n", cpu);
910 }
Olav Haugan9306c802016-08-18 17:22:44 -0700911 break;
912
913 case CPU_ONLINE:
Olav Haugan833926cb2016-06-27 11:38:06 -0700914
915 state->online = true;
916 cluster->active_cpus = get_active_cpu_count(cluster);
917
Olav Haugan9306c802016-08-18 17:22:44 -0700918 /*
919 * Moving to the end of the list should only happen in
920 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
921 * infinite list traversal when thermal (or other entities)
922 * reject trying to online CPUs.
923 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700924 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700925 break;
926
927 case CPU_DEAD:
Olav Haugan833926cb2016-06-27 11:38:06 -0700928 /*
929 * We don't want to have a CPU both offline and isolated.
930 * So unisolate a CPU that went down if it was isolated by us.
931 */
932 if (state->isolated_by_us) {
933 sched_unisolate_cpu_unlocked(cpu);
934 state->isolated_by_us = false;
935 }
936
Olav Haugan9306c802016-08-18 17:22:44 -0700937 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700938 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700939
940 /* Fall through */
941
942 case CPU_UP_CANCELED:
943
944 /* If online state of CPU somehow got out of sync, fix it. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700945 if (!state->online)
Olav Haugan9306c802016-08-18 17:22:44 -0700946 pr_warn("CPU%d online when state is offline\n", cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700947
948 state->online = false;
949 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700950 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700951 break;
952 }
953
Olav Haugan833926cb2016-06-27 11:38:06 -0700954 need = apply_limits(cluster, cluster->need_cpus);
955 if (adjustment_possible(cluster, need))
956 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700957
958 return ret;
959}
960
961static struct notifier_block __refdata cpu_notifier = {
962 .notifier_call = cpu_callback,
963};
964
965/* ============================ init code ============================== */
966
Olav Haugane3a3db92016-06-27 11:35:43 -0700967static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
968{
969 unsigned int i;
970
971 for (i = 0; i < num_clusters; ++i) {
972 if (cluster_state[i].first_cpu == first_cpu)
973 return &cluster_state[i];
974 }
975
976 return NULL;
977}
978
979static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -0700980{
981 struct device *dev;
982 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -0700983 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700984 struct cpu_data *state;
985 unsigned int cpu;
986 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
987
Olav Haugane3a3db92016-06-27 11:35:43 -0700988 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -0700989 return 0;
990
991 dev = get_cpu_device(first_cpu);
992 if (!dev)
993 return -ENODEV;
994
995 pr_info("Creating CPU group %d\n", first_cpu);
996
Olav Haugane3a3db92016-06-27 11:35:43 -0700997 if (num_clusters == MAX_CLUSTERS) {
998 pr_err("Unsupported number of clusters. Only %u supported\n",
999 MAX_CLUSTERS);
1000 return -EINVAL;
1001 }
1002 cluster = &cluster_state[num_clusters];
1003 ++num_clusters;
1004
Olav Haugan833926cb2016-06-27 11:38:06 -07001005 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001006 cluster->num_cpus = cpumask_weight(mask);
1007 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001008 pr_err("HW configuration not supported\n");
1009 return -EINVAL;
1010 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001011 cluster->first_cpu = first_cpu;
1012 cluster->min_cpus = 1;
1013 cluster->max_cpus = cluster->num_cpus;
1014 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001015 cluster->offline_delay_ms = 100;
1016 cluster->task_thres = UINT_MAX;
1017 cluster->nrrun = cluster->num_cpus;
1018 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001019 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001020
1021 for_each_cpu(cpu, mask) {
1022 pr_info("Init CPU%u state\n", cpu);
1023
1024 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001025 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001026 state->cpu = cpu;
Olav Haugan833926cb2016-06-27 11:38:06 -07001027 if (cpu_online(cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001028 state->online = true;
Olav Haugane3a3db92016-06-27 11:35:43 -07001029 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001030 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001031 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001032
Olav Haugan833926cb2016-06-27 11:38:06 -07001033 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001034 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001035 if (IS_ERR(cluster->core_ctl_thread))
1036 return PTR_ERR(cluster->core_ctl_thread);
1037
1038 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001039 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001040
Olav Haugane3a3db92016-06-27 11:35:43 -07001041 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001042
Olav Haugane3a3db92016-06-27 11:35:43 -07001043 kobject_init(&cluster->kobj, &ktype_core_ctl);
1044 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001045}
1046
1047static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
1048 void *data)
1049{
1050 struct cpufreq_policy *policy = data;
Olav Haugan833926cb2016-06-27 11:38:06 -07001051 int ret;
Olav Haugan9306c802016-08-18 17:22:44 -07001052
1053 switch (val) {
1054 case CPUFREQ_CREATE_POLICY:
Olav Haugan833926cb2016-06-27 11:38:06 -07001055 ret = cluster_init(policy->related_cpus);
1056 if (ret)
1057 pr_warn("unable to create core ctl group: %d\n", ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001058 break;
1059 }
1060
1061 return NOTIFY_OK;
1062}
1063
1064static struct notifier_block cpufreq_pol_nb = {
1065 .notifier_call = cpufreq_policy_cb,
1066};
1067
1068static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
1069 void *data)
1070{
1071 struct cpufreq_govinfo *info = data;
1072
1073 switch (val) {
1074 case CPUFREQ_LOAD_CHANGE:
1075 core_ctl_set_busy(info->cpu, info->load);
1076 break;
1077 }
1078
1079 return NOTIFY_OK;
1080}
1081
1082static struct notifier_block cpufreq_gov_nb = {
1083 .notifier_call = cpufreq_gov_cb,
1084};
1085
1086static int __init core_ctl_init(void)
1087{
Olav Haugan9306c802016-08-18 17:22:44 -07001088 unsigned int cpu;
1089
Olav Haugan833926cb2016-06-27 11:38:06 -07001090 core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
1091 * NSEC_PER_MSEC;
1092
Olav Haugan9306c802016-08-18 17:22:44 -07001093 register_cpu_notifier(&cpu_notifier);
1094 cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
1095 cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
Olav Haugan9306c802016-08-18 17:22:44 -07001096
Olav Haugan833926cb2016-06-27 11:38:06 -07001097 lock_device_hotplug();
Olav Haugan9306c802016-08-18 17:22:44 -07001098 for_each_online_cpu(cpu) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001099 struct cpufreq_policy *policy;
1100 int ret;
1101
Olav Haugan9306c802016-08-18 17:22:44 -07001102 policy = cpufreq_cpu_get(cpu);
1103 if (policy) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001104 ret = cluster_init(policy->related_cpus);
1105 if (ret)
1106 pr_warn("unable to create core ctl group: %d\n"
1107 , ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001108 cpufreq_cpu_put(policy);
1109 }
1110 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001111 unlock_device_hotplug();
1112 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001113 return 0;
1114}
1115
1116late_initcall(core_ctl_init);