blob: 1e3accddd103f8aefb1c46f521b6ef0b8322687f [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/init.h>
14#include <linux/notifier.h>
15#include <linux/cpu.h>
16#include <linux/cpumask.h>
17#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070018#include <linux/kthread.h>
19#include <linux/sched.h>
20#include <linux/sched/rt.h>
21
Olav Haugan09bcc682016-09-02 17:12:20 -070022#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070023
Olav Haugane3a3db92016-06-27 11:35:43 -070024#define MAX_CPUS_PER_CLUSTER 4
25#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070026
Olav Haugane3a3db92016-06-27 11:35:43 -070027struct cluster_data {
28 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070029 unsigned int min_cpus;
30 unsigned int max_cpus;
31 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070032 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
33 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070034 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070035 unsigned int num_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070036 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070037 unsigned int need_cpus;
38 unsigned int task_thres;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053039 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070040 struct list_head lru;
41 bool pending;
42 spinlock_t pending_lock;
43 bool is_big_cluster;
44 int nrrun;
45 bool nrrun_changed;
Olav Haugan833926cb2016-06-27 11:38:06 -070046 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070047 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070048 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070049 struct kobject kobj;
50};
51
Olav Haugane3a3db92016-06-27 11:35:43 -070052struct cpu_data {
53 bool online;
Olav Haugane3a3db92016-06-27 11:35:43 -070054 bool is_busy;
55 unsigned int busy;
56 unsigned int cpu;
57 bool not_preferred;
58 struct cluster_data *cluster;
59 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070060 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070061};
62
Olav Haugan9306c802016-08-18 17:22:44 -070063static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070064static struct cluster_data cluster_state[MAX_CLUSTERS];
65static unsigned int num_clusters;
66
67#define for_each_cluster(cluster, idx) \
68 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
69 (idx)++, (cluster) = &cluster_state[idx])
70
Olav Haugan9306c802016-08-18 17:22:44 -070071static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070072static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070073static void wake_up_core_ctl_thread(struct cluster_data *state);
74static bool initialized;
75
76static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070077
78/* ========================= sysfs interface =========================== */
79
Olav Haugane3a3db92016-06-27 11:35:43 -070080static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070081 const char *buf, size_t count)
82{
83 unsigned int val;
84
85 if (sscanf(buf, "%u\n", &val) != 1)
86 return -EINVAL;
87
88 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070089 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070090
91 return count;
92}
93
Olav Haugane3a3db92016-06-27 11:35:43 -070094static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070095{
96 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
97}
98
Olav Haugane3a3db92016-06-27 11:35:43 -070099static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700100 const char *buf, size_t count)
101{
102 unsigned int val;
103
104 if (sscanf(buf, "%u\n", &val) != 1)
105 return -EINVAL;
106
107 val = min(val, state->num_cpus);
108 state->max_cpus = val;
109 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700110 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700111
112 return count;
113}
114
Olav Haugane3a3db92016-06-27 11:35:43 -0700115static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700116{
117 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
118}
119
Olav Haugane3a3db92016-06-27 11:35:43 -0700120static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700121 const char *buf, size_t count)
122{
123 unsigned int val;
124
125 if (sscanf(buf, "%u\n", &val) != 1)
126 return -EINVAL;
127
128 state->offline_delay_ms = val;
129 apply_need(state);
130
131 return count;
132}
133
Olav Haugane3a3db92016-06-27 11:35:43 -0700134static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700135{
136 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
137}
138
Olav Haugane3a3db92016-06-27 11:35:43 -0700139static ssize_t store_task_thres(struct cluster_data *state,
140 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700141{
142 unsigned int val;
143
144 if (sscanf(buf, "%u\n", &val) != 1)
145 return -EINVAL;
146
147 if (val < state->num_cpus)
148 return -EINVAL;
149
150 state->task_thres = val;
151 apply_need(state);
152
153 return count;
154}
155
Olav Haugane3a3db92016-06-27 11:35:43 -0700156static ssize_t show_offline_delay_ms(const struct cluster_data *state,
157 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700158{
159 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
160}
161
Olav Haugane3a3db92016-06-27 11:35:43 -0700162static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700163 const char *buf, size_t count)
164{
Olav Haugane3a3db92016-06-27 11:35:43 -0700165 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700166 int ret, i;
167
168 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
169 if (ret != 1 && ret != state->num_cpus)
170 return -EINVAL;
171
172 if (ret == 1) {
173 for (i = 0; i < state->num_cpus; i++)
174 state->busy_up_thres[i] = val[0];
175 } else {
176 for (i = 0; i < state->num_cpus; i++)
177 state->busy_up_thres[i] = val[i];
178 }
179 apply_need(state);
180 return count;
181}
182
Olav Haugane3a3db92016-06-27 11:35:43 -0700183static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700184{
185 int i, count = 0;
186
187 for (i = 0; i < state->num_cpus; i++)
188 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
189 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700190
Olav Haugan9306c802016-08-18 17:22:44 -0700191 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
192 return count;
193}
194
Olav Haugane3a3db92016-06-27 11:35:43 -0700195static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700196 const char *buf, size_t count)
197{
Olav Haugane3a3db92016-06-27 11:35:43 -0700198 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700199 int ret, i;
200
201 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
202 if (ret != 1 && ret != state->num_cpus)
203 return -EINVAL;
204
205 if (ret == 1) {
206 for (i = 0; i < state->num_cpus; i++)
207 state->busy_down_thres[i] = val[0];
208 } else {
209 for (i = 0; i < state->num_cpus; i++)
210 state->busy_down_thres[i] = val[i];
211 }
212 apply_need(state);
213 return count;
214}
215
Olav Haugane3a3db92016-06-27 11:35:43 -0700216static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700217{
218 int i, count = 0;
219
220 for (i = 0; i < state->num_cpus; i++)
221 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
222 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700223
Olav Haugan9306c802016-08-18 17:22:44 -0700224 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
225 return count;
226}
227
Olav Haugane3a3db92016-06-27 11:35:43 -0700228static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700229 const char *buf, size_t count)
230{
231 unsigned int val;
232
233 if (sscanf(buf, "%u\n", &val) != 1)
234 return -EINVAL;
235
236 state->is_big_cluster = val ? 1 : 0;
237 return count;
238}
239
Olav Haugane3a3db92016-06-27 11:35:43 -0700240static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700241{
242 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
243}
244
Olav Haugane3a3db92016-06-27 11:35:43 -0700245static ssize_t show_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700246{
247 struct cpu_data *c;
248 ssize_t count = 0;
249 unsigned long flags;
250
251 spin_lock_irqsave(&state_lock, flags);
252 list_for_each_entry(c, &state->lru, sib) {
253 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700254 "CPU%u (%s)\n", c->cpu,
255 c->online ? "Online" : "Offline");
Olav Haugan9306c802016-08-18 17:22:44 -0700256 }
257 spin_unlock_irqrestore(&state_lock, flags);
258 return count;
259}
260
Olav Haugane3a3db92016-06-27 11:35:43 -0700261static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700262{
263 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
264}
265
Olav Haugan833926cb2016-06-27 11:38:06 -0700266static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700267{
Olav Haugan833926cb2016-06-27 11:38:06 -0700268 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700269}
270
Olav Haugane3a3db92016-06-27 11:35:43 -0700271static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700272{
273 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700274 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700275 ssize_t count = 0;
276 unsigned int cpu;
277
278 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700279 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700280 cluster = c->cluster;
281 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700282 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700283
Olav Haugan9306c802016-08-18 17:22:44 -0700284 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700285 "CPU%u\n", cpu);
286 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700287 "\tCPU: %u\n", c->cpu);
288 count += snprintf(buf + count, PAGE_SIZE - count,
289 "\tOnline: %u\n", c->online);
290 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700291 "\tActive: %u\n",
292 !cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700293 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700294 "\tFirst CPU: %u\n",
295 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700296 count += snprintf(buf + count, PAGE_SIZE - count,
297 "\tBusy%%: %u\n", c->busy);
298 count += snprintf(buf + count, PAGE_SIZE - count,
299 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700300 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530301 "\tNot preferred: %u\n",
302 c->not_preferred);
303 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700304 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700305 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700306 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700307 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700308 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700309 count += snprintf(buf + count, PAGE_SIZE - count,
310 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700311 }
312
313 return count;
314}
315
Olav Haugane3a3db92016-06-27 11:35:43 -0700316static ssize_t store_not_preferred(struct cluster_data *state,
317 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700318{
319 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700320 unsigned int i;
321 unsigned int val[MAX_CPUS_PER_CLUSTER];
322 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700323 int ret;
324
325 ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530326 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700327 return -EINVAL;
328
Olav Haugane3a3db92016-06-27 11:35:43 -0700329 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530330 for (i = 0; i < state->num_cpus; i++) {
331 c = &per_cpu(cpu_state, i + state->first_cpu);
332 c->not_preferred = val[i];
333 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700334 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700335
336 return count;
337}
338
Olav Haugane3a3db92016-06-27 11:35:43 -0700339static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700340{
341 struct cpu_data *c;
342 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700343 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530344 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700345
Olav Haugane3a3db92016-06-27 11:35:43 -0700346 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530347 for (i = 0; i < state->num_cpus; i++) {
348 c = &per_cpu(cpu_state, i + state->first_cpu);
349 count += scnprintf(buf + count, PAGE_SIZE - count,
350 "CPU#%d: %u\n", c->cpu, c->not_preferred);
351 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700352 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700353
354 return count;
355}
356
Olav Haugane3a3db92016-06-27 11:35:43 -0700357
Olav Haugan9306c802016-08-18 17:22:44 -0700358struct core_ctl_attr {
359 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700360 ssize_t (*show)(const struct cluster_data *, char *);
361 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700362};
363
364#define core_ctl_attr_ro(_name) \
365static struct core_ctl_attr _name = \
366__ATTR(_name, 0444, show_##_name, NULL)
367
368#define core_ctl_attr_rw(_name) \
369static struct core_ctl_attr _name = \
370__ATTR(_name, 0644, show_##_name, store_##_name)
371
372core_ctl_attr_rw(min_cpus);
373core_ctl_attr_rw(max_cpus);
374core_ctl_attr_rw(offline_delay_ms);
375core_ctl_attr_rw(busy_up_thres);
376core_ctl_attr_rw(busy_down_thres);
377core_ctl_attr_rw(task_thres);
378core_ctl_attr_rw(is_big_cluster);
379core_ctl_attr_ro(cpus);
380core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700381core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700382core_ctl_attr_ro(global_state);
383core_ctl_attr_rw(not_preferred);
384
385static struct attribute *default_attrs[] = {
386 &min_cpus.attr,
387 &max_cpus.attr,
388 &offline_delay_ms.attr,
389 &busy_up_thres.attr,
390 &busy_down_thres.attr,
391 &task_thres.attr,
392 &is_big_cluster.attr,
393 &cpus.attr,
394 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700395 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700396 &global_state.attr,
397 &not_preferred.attr,
398 NULL
399};
400
Olav Haugane3a3db92016-06-27 11:35:43 -0700401#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700402#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
403static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
404{
Olav Haugane3a3db92016-06-27 11:35:43 -0700405 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700406 struct core_ctl_attr *cattr = to_attr(attr);
407 ssize_t ret = -EIO;
408
409 if (cattr->show)
410 ret = cattr->show(data, buf);
411
412 return ret;
413}
414
415static ssize_t store(struct kobject *kobj, struct attribute *attr,
416 const char *buf, size_t count)
417{
Olav Haugane3a3db92016-06-27 11:35:43 -0700418 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700419 struct core_ctl_attr *cattr = to_attr(attr);
420 ssize_t ret = -EIO;
421
422 if (cattr->store)
423 ret = cattr->store(data, buf, count);
424
425 return ret;
426}
427
428static const struct sysfs_ops sysfs_ops = {
429 .show = show,
430 .store = store,
431};
432
433static struct kobj_type ktype_core_ctl = {
434 .sysfs_ops = &sysfs_ops,
435 .default_attrs = default_attrs,
436};
437
438/* ==================== runqueue based core count =================== */
439
440#define RQ_AVG_TOLERANCE 2
441#define RQ_AVG_DEFAULT_MS 20
442#define NR_RUNNING_TOLERANCE 5
443static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
444
445static s64 rq_avg_timestamp_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700446
447static void update_running_avg(bool trigger_update)
448{
Olav Haugan9306c802016-08-18 17:22:44 -0700449 int avg, iowait_avg, big_avg, old_nrrun;
450 s64 now;
451 unsigned long flags;
Olav Haugane3a3db92016-06-27 11:35:43 -0700452 struct cluster_data *cluster;
453 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700454
455 spin_lock_irqsave(&state_lock, flags);
456
457 now = ktime_to_ms(ktime_get());
458 if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
459 spin_unlock_irqrestore(&state_lock, flags);
460 return;
461 }
462 rq_avg_timestamp_ms = now;
463 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
464
465 spin_unlock_irqrestore(&state_lock, flags);
466
467 /*
468 * Round up to the next integer if the average nr running tasks
469 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
470 * If normal rounding up is used, it will allow a transient task
471 * to trigger online event. By the time core is onlined, the task
472 * has finished.
473 * Rounding to closest suffers same problem because scheduler
474 * might only provide running stats per jiffy, and a transient
475 * task could skew the number for one jiffy. If core control
476 * samples every 2 jiffies, it will observe 0.5 additional running
477 * average which rounds up to 1 task.
478 */
479 avg = (avg + NR_RUNNING_TOLERANCE) / 100;
480 big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
481
Olav Haugane3a3db92016-06-27 11:35:43 -0700482 for_each_cluster(cluster, index) {
483 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700484 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700485 old_nrrun = cluster->nrrun;
Olav Haugan9306c802016-08-18 17:22:44 -0700486 /*
487 * Big cluster only need to take care of big tasks, but if
488 * there are not enough big cores, big tasks need to be run
489 * on little as well. Thus for little's runqueue stat, it
490 * has to use overall runqueue average, or derive what big
491 * tasks would have to be run on little. The latter approach
492 * is not easy to get given core control reacts much slower
493 * than scheduler, and can't predict scheduler's behavior.
494 */
Olav Haugane3a3db92016-06-27 11:35:43 -0700495 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
496 if (cluster->nrrun != old_nrrun) {
Olav Haugan9306c802016-08-18 17:22:44 -0700497 if (trigger_update)
Olav Haugane3a3db92016-06-27 11:35:43 -0700498 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700499 else
Olav Haugane3a3db92016-06-27 11:35:43 -0700500 cluster->nrrun_changed = true;
Olav Haugan9306c802016-08-18 17:22:44 -0700501 }
502 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700503 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700504}
505
506/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700507static unsigned int apply_task_need(const struct cluster_data *cluster,
508 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700509{
Olav Haugan833926cb2016-06-27 11:38:06 -0700510 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700511 if (cluster->nrrun >= cluster->task_thres)
512 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700513
Olav Haugan833926cb2016-06-27 11:38:06 -0700514 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700515 if (cluster->nrrun > new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700516 return new_need + 1;
517
518 return new_need;
519}
520
Olav Haugan9306c802016-08-18 17:22:44 -0700521/* ======================= load based core count ====================== */
522
Olav Haugane3a3db92016-06-27 11:35:43 -0700523static unsigned int apply_limits(const struct cluster_data *cluster,
524 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700525{
Olav Haugane3a3db92016-06-27 11:35:43 -0700526 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700527}
528
Olav Haugan833926cb2016-06-27 11:38:06 -0700529static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
530{
531 return cluster->num_cpus -
532 sched_isolate_count(&cluster->cpu_mask, true);
533}
534
535static bool is_active(const struct cpu_data *state)
536{
537 return state->online && !cpu_isolated(state->cpu);
538}
539
540static bool adjustment_possible(const struct cluster_data *cluster,
541 unsigned int need)
542{
543 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
544 sched_isolate_count(&cluster->cpu_mask, false)));
545}
546
Olav Haugane3a3db92016-06-27 11:35:43 -0700547static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700548{
549 unsigned long flags;
550 struct cpu_data *c;
551 unsigned int need_cpus = 0, last_need, thres_idx;
552 int ret = 0;
553 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700554 unsigned int active_cpus;
555 unsigned int new_need;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530556 s64 now;
Olav Haugan9306c802016-08-18 17:22:44 -0700557
Olav Haugane3a3db92016-06-27 11:35:43 -0700558 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700559 return 0;
560
561 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700562
563 if (cluster->boost) {
564 need_cpus = cluster->max_cpus;
565 } else {
566 active_cpus = get_active_cpu_count(cluster);
567 thres_idx = active_cpus ? active_cpus - 1 : 0;
568 list_for_each_entry(c, &cluster->lru, sib) {
569 if (c->busy >= cluster->busy_up_thres[thres_idx])
570 c->is_busy = true;
571 else if (c->busy < cluster->busy_down_thres[thres_idx])
572 c->is_busy = false;
573 need_cpus += c->is_busy;
574 }
575 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700576 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700577 new_need = apply_limits(cluster, need_cpus);
578 need_flag = adjustment_possible(cluster, new_need);
579
Olav Haugane3a3db92016-06-27 11:35:43 -0700580 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530581 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700582
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530583 if (new_need == last_need) {
584 cluster->need_ts = now;
Olav Haugan9306c802016-08-18 17:22:44 -0700585 spin_unlock_irqrestore(&state_lock, flags);
586 return 0;
587 }
588
Olav Haugan833926cb2016-06-27 11:38:06 -0700589 if (need_cpus > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700590 ret = 1;
Olav Haugan833926cb2016-06-27 11:38:06 -0700591 } else if (need_cpus < cluster->active_cpus) {
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530592 s64 elapsed = now - cluster->need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -0700593
Olav Haugan833926cb2016-06-27 11:38:06 -0700594 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700595 }
596
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530597 if (ret) {
598 cluster->need_ts = now;
599 cluster->need_cpus = new_need;
600 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700601 trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
Olav Haugan9306c802016-08-18 17:22:44 -0700602 ret && need_flag);
603 spin_unlock_irqrestore(&state_lock, flags);
604
605 return ret && need_flag;
606}
607
Olav Haugane3a3db92016-06-27 11:35:43 -0700608static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700609{
Olav Haugane3a3db92016-06-27 11:35:43 -0700610 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700611 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700612}
613
614static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
615{
616 struct cpu_data *c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700617 struct cluster_data *cluster = c->cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700618 unsigned int old_is_busy = c->is_busy;
619
Olav Haugane3a3db92016-06-27 11:35:43 -0700620 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700621 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700622
623 update_running_avg(false);
Olav Haugane3a3db92016-06-27 11:35:43 -0700624 if (c->busy == busy && !cluster->nrrun_changed)
Olav Haugan9306c802016-08-18 17:22:44 -0700625 return 0;
626 c->busy = busy;
Olav Haugane3a3db92016-06-27 11:35:43 -0700627 cluster->nrrun_changed = false;
Olav Haugan9306c802016-08-18 17:22:44 -0700628
Olav Haugane3a3db92016-06-27 11:35:43 -0700629 apply_need(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700630 trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
631 return 0;
632}
633
634/* ========================= core count enforcement ==================== */
635
Olav Haugan833926cb2016-06-27 11:38:06 -0700636static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700637{
638 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700639
Olav Haugane3a3db92016-06-27 11:35:43 -0700640 spin_lock_irqsave(&cluster->pending_lock, flags);
641 cluster->pending = true;
642 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700643
Olav Haugan833926cb2016-06-27 11:38:06 -0700644 wake_up_process_no_notif(cluster->core_ctl_thread);
645}
646
647static u64 core_ctl_check_timestamp;
648static u64 core_ctl_check_interval;
649
650static bool do_check(u64 wallclock)
651{
652 bool do_check = false;
653 unsigned long flags;
654
655 spin_lock_irqsave(&state_lock, flags);
656 if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
657 core_ctl_check_timestamp = wallclock;
658 do_check = true;
659 }
660 spin_unlock_irqrestore(&state_lock, flags);
661 return do_check;
662}
663
Olav Haugana024f472016-10-13 10:34:11 -0700664int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700665{
666 unsigned int index = 0;
667 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700668 unsigned long flags;
669 int ret = 0;
670 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700671
Olav Haugana024f472016-10-13 10:34:11 -0700672 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700673 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700674 if (cluster->is_big_cluster) {
675 if (boost) {
676 boost_state_changed = !cluster->boost;
677 ++cluster->boost;
678 } else {
679 if (!cluster->boost) {
680 pr_err("Error turning off boost. Boost already turned off\n");
681 ret = -EINVAL;
682 } else {
683 --cluster->boost;
684 boost_state_changed = !cluster->boost;
685 }
686 }
687 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700688 }
Olav Haugan9306c802016-08-18 17:22:44 -0700689 }
Olav Haugana024f472016-10-13 10:34:11 -0700690 spin_unlock_irqrestore(&state_lock, flags);
691
692 if (boost_state_changed)
693 apply_need(cluster);
694
695 trace_core_ctl_set_boost(cluster->boost, ret);
696
697 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700698}
Olav Haugan4d77e572016-11-14 16:14:23 -0800699EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700700
Olav Haugan833926cb2016-06-27 11:38:06 -0700701void core_ctl_check(u64 wallclock)
Olav Haugan9306c802016-08-18 17:22:44 -0700702{
Olav Haugan833926cb2016-06-27 11:38:06 -0700703 if (unlikely(!initialized))
704 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700705
Olav Haugan833926cb2016-06-27 11:38:06 -0700706 if (do_check(wallclock)) {
707 unsigned int index = 0;
708 struct cluster_data *cluster;
Olav Haugane3a3db92016-06-27 11:35:43 -0700709
Olav Haugan833926cb2016-06-27 11:38:06 -0700710 update_running_avg(true);
711
712 for_each_cluster(cluster, index) {
713 if (eval_need(cluster))
714 wake_up_core_ctl_thread(cluster);
715 }
Olav Haugan9306c802016-08-18 17:22:44 -0700716 }
Olav Haugan9306c802016-08-18 17:22:44 -0700717}
718
Olav Haugan833926cb2016-06-27 11:38:06 -0700719static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700720{
Olav Haugan833926cb2016-06-27 11:38:06 -0700721 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700722
Olav Haugan833926cb2016-06-27 11:38:06 -0700723 spin_lock_irqsave(&state_lock, flags);
724 list_del(&cpu_data->sib);
725 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
726 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700727}
728
Olav Haugan833926cb2016-06-27 11:38:06 -0700729static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700730{
Olav Haugan9306c802016-08-18 17:22:44 -0700731 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800732 unsigned long flags;
733 unsigned int num_cpus = cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700734
Olav Haugan85970732016-11-08 13:45:01 -0800735 /*
736 * Protect against entry being removed (and added at tail) by other
737 * thread (hotplug).
738 */
739 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700740 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800741 if (!num_cpus--)
742 break;
743
Olav Haugan833926cb2016-06-27 11:38:06 -0700744 if (!is_active(c))
745 continue;
746 if (cluster->active_cpus == need)
747 break;
748 /* Don't offline busy CPUs. */
749 if (c->is_busy)
750 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700751
Olav Haugan85970732016-11-08 13:45:01 -0800752 spin_unlock_irqrestore(&state_lock, flags);
753
Olav Haugan833926cb2016-06-27 11:38:06 -0700754 pr_debug("Trying to isolate CPU%u\n", c->cpu);
755 if (!sched_isolate_cpu(c->cpu)) {
756 c->isolated_by_us = true;
757 move_cpu_lru(c);
Olav Haugan833926cb2016-06-27 11:38:06 -0700758 } else {
759 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700760 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700761 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800762 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700763 }
Olav Haugan85970732016-11-08 13:45:01 -0800764 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700765
Olav Haugan833926cb2016-06-27 11:38:06 -0700766 /*
767 * If the number of active CPUs is within the limits, then
768 * don't force isolation of any busy CPUs.
769 */
770 if (cluster->active_cpus <= cluster->max_cpus)
771 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700772
Olav Haugan85970732016-11-08 13:45:01 -0800773 num_cpus = cluster->num_cpus;
774 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700775 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800776 if (!num_cpus--)
777 break;
778
Olav Haugan833926cb2016-06-27 11:38:06 -0700779 if (!is_active(c))
780 continue;
781 if (cluster->active_cpus <= cluster->max_cpus)
782 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700783
Olav Haugan85970732016-11-08 13:45:01 -0800784 spin_unlock_irqrestore(&state_lock, flags);
785
Olav Haugan833926cb2016-06-27 11:38:06 -0700786 pr_debug("Trying to isolate CPU%u\n", c->cpu);
787 if (!sched_isolate_cpu(c->cpu)) {
788 c->isolated_by_us = true;
789 move_cpu_lru(c);
Olav Haugan833926cb2016-06-27 11:38:06 -0700790 } else {
791 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700792 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700793 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800794 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700795 }
Olav Haugan85970732016-11-08 13:45:01 -0800796 spin_unlock_irqrestore(&state_lock, flags);
797
Olav Haugan9306c802016-08-18 17:22:44 -0700798}
799
Olav Haugan833926cb2016-06-27 11:38:06 -0700800static void __try_to_unisolate(struct cluster_data *cluster,
801 unsigned int need, bool force)
802{
803 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800804 unsigned long flags;
805 unsigned int num_cpus = cluster->num_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -0700806
Olav Haugan85970732016-11-08 13:45:01 -0800807 /*
808 * Protect against entry being removed (and added at tail) by other
809 * thread (hotplug).
810 */
811 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700812 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800813 if (!num_cpus--)
814 break;
815
Olav Haugan833926cb2016-06-27 11:38:06 -0700816 if (!c->isolated_by_us)
817 continue;
818 if ((c->online && !cpu_isolated(c->cpu)) ||
819 (!force && c->not_preferred))
820 continue;
821 if (cluster->active_cpus == need)
822 break;
823
Olav Haugan85970732016-11-08 13:45:01 -0800824 spin_unlock_irqrestore(&state_lock, flags);
825
Olav Haugan833926cb2016-06-27 11:38:06 -0700826 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
827 if (!sched_unisolate_cpu(c->cpu)) {
828 c->isolated_by_us = false;
829 move_cpu_lru(c);
830 } else {
831 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
832 }
833 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800834 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700835 }
Olav Haugan85970732016-11-08 13:45:01 -0800836 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700837}
838
839static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
840{
841 bool force_use_non_preferred = false;
842
843 __try_to_unisolate(cluster, need, force_use_non_preferred);
844
845 if (cluster->active_cpus == need)
846 return;
847
848 force_use_non_preferred = true;
849 __try_to_unisolate(cluster, need, force_use_non_preferred);
850}
851
852static void __ref do_core_ctl(struct cluster_data *cluster)
853{
854 unsigned int need;
855
856 need = apply_limits(cluster, cluster->need_cpus);
857
858 if (adjustment_possible(cluster, need)) {
859 pr_debug("Trying to adjust group %u from %u to %u\n",
860 cluster->first_cpu, cluster->active_cpus, need);
861
862 if (cluster->active_cpus > need)
863 try_to_isolate(cluster, need);
864 else if (cluster->active_cpus < need)
865 try_to_unisolate(cluster, need);
866 }
867}
868
869static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700870{
Olav Haugane3a3db92016-06-27 11:35:43 -0700871 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700872 unsigned long flags;
873
874 while (1) {
875 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700876 spin_lock_irqsave(&cluster->pending_lock, flags);
877 if (!cluster->pending) {
878 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700879 schedule();
880 if (kthread_should_stop())
881 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700882 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700883 }
884 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700885 cluster->pending = false;
886 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700887
Olav Haugan833926cb2016-06-27 11:38:06 -0700888 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700889 }
890
891 return 0;
892}
893
894static int __ref cpu_callback(struct notifier_block *nfb,
895 unsigned long action, void *hcpu)
896{
897 uint32_t cpu = (uintptr_t)hcpu;
898 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700899 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700900 unsigned int need;
Olav Haugan9306c802016-08-18 17:22:44 -0700901 int ret = NOTIFY_OK;
Olav Haugan9306c802016-08-18 17:22:44 -0700902
Olav Haugane3a3db92016-06-27 11:35:43 -0700903 if (unlikely(!cluster || !cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700904 return NOTIFY_OK;
905
Olav Haugan1e8a44c2016-11-17 18:31:33 -0800906 switch (action & ~CPU_TASKS_FROZEN) {
Olav Haugan9306c802016-08-18 17:22:44 -0700907 case CPU_UP_PREPARE:
908
909 /* If online state of CPU somehow got out of sync, fix it. */
910 if (state->online) {
Olav Haugan9306c802016-08-18 17:22:44 -0700911 state->online = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700912 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700913 pr_warn("CPU%d offline when state is online\n", cpu);
914 }
Olav Haugan9306c802016-08-18 17:22:44 -0700915 break;
916
917 case CPU_ONLINE:
Olav Haugan833926cb2016-06-27 11:38:06 -0700918
919 state->online = true;
920 cluster->active_cpus = get_active_cpu_count(cluster);
921
Olav Haugan9306c802016-08-18 17:22:44 -0700922 /*
923 * Moving to the end of the list should only happen in
924 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
925 * infinite list traversal when thermal (or other entities)
926 * reject trying to online CPUs.
927 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700928 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700929 break;
930
931 case CPU_DEAD:
Olav Haugan833926cb2016-06-27 11:38:06 -0700932 /*
933 * We don't want to have a CPU both offline and isolated.
934 * So unisolate a CPU that went down if it was isolated by us.
935 */
936 if (state->isolated_by_us) {
937 sched_unisolate_cpu_unlocked(cpu);
938 state->isolated_by_us = false;
939 }
940
Olav Haugan9306c802016-08-18 17:22:44 -0700941 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700942 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700943
944 /* Fall through */
945
946 case CPU_UP_CANCELED:
947
948 /* If online state of CPU somehow got out of sync, fix it. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700949 if (!state->online)
Olav Haugan9306c802016-08-18 17:22:44 -0700950 pr_warn("CPU%d online when state is offline\n", cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700951
952 state->online = false;
953 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700954 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700955 break;
956 }
957
Olav Haugan833926cb2016-06-27 11:38:06 -0700958 need = apply_limits(cluster, cluster->need_cpus);
959 if (adjustment_possible(cluster, need))
960 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700961
962 return ret;
963}
964
965static struct notifier_block __refdata cpu_notifier = {
966 .notifier_call = cpu_callback,
967};
968
969/* ============================ init code ============================== */
970
Olav Haugane3a3db92016-06-27 11:35:43 -0700971static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
972{
973 unsigned int i;
974
975 for (i = 0; i < num_clusters; ++i) {
976 if (cluster_state[i].first_cpu == first_cpu)
977 return &cluster_state[i];
978 }
979
980 return NULL;
981}
982
983static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -0700984{
985 struct device *dev;
986 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -0700987 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700988 struct cpu_data *state;
989 unsigned int cpu;
990 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
991
Olav Haugane3a3db92016-06-27 11:35:43 -0700992 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -0700993 return 0;
994
995 dev = get_cpu_device(first_cpu);
996 if (!dev)
997 return -ENODEV;
998
999 pr_info("Creating CPU group %d\n", first_cpu);
1000
Olav Haugane3a3db92016-06-27 11:35:43 -07001001 if (num_clusters == MAX_CLUSTERS) {
1002 pr_err("Unsupported number of clusters. Only %u supported\n",
1003 MAX_CLUSTERS);
1004 return -EINVAL;
1005 }
1006 cluster = &cluster_state[num_clusters];
1007 ++num_clusters;
1008
Olav Haugan833926cb2016-06-27 11:38:06 -07001009 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001010 cluster->num_cpus = cpumask_weight(mask);
1011 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001012 pr_err("HW configuration not supported\n");
1013 return -EINVAL;
1014 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001015 cluster->first_cpu = first_cpu;
1016 cluster->min_cpus = 1;
1017 cluster->max_cpus = cluster->num_cpus;
1018 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001019 cluster->offline_delay_ms = 100;
1020 cluster->task_thres = UINT_MAX;
1021 cluster->nrrun = cluster->num_cpus;
1022 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001023 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001024
1025 for_each_cpu(cpu, mask) {
1026 pr_info("Init CPU%u state\n", cpu);
1027
1028 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001029 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001030 state->cpu = cpu;
Olav Haugan833926cb2016-06-27 11:38:06 -07001031 if (cpu_online(cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001032 state->online = true;
Olav Haugane3a3db92016-06-27 11:35:43 -07001033 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001034 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001035 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001036
Olav Haugan833926cb2016-06-27 11:38:06 -07001037 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001038 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001039 if (IS_ERR(cluster->core_ctl_thread))
1040 return PTR_ERR(cluster->core_ctl_thread);
1041
1042 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001043 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001044
Olav Haugane3a3db92016-06-27 11:35:43 -07001045 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001046
Olav Haugane3a3db92016-06-27 11:35:43 -07001047 kobject_init(&cluster->kobj, &ktype_core_ctl);
1048 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001049}
1050
1051static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
1052 void *data)
1053{
1054 struct cpufreq_policy *policy = data;
Olav Haugan833926cb2016-06-27 11:38:06 -07001055 int ret;
Olav Haugan9306c802016-08-18 17:22:44 -07001056
1057 switch (val) {
1058 case CPUFREQ_CREATE_POLICY:
Olav Haugan833926cb2016-06-27 11:38:06 -07001059 ret = cluster_init(policy->related_cpus);
1060 if (ret)
1061 pr_warn("unable to create core ctl group: %d\n", ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001062 break;
1063 }
1064
1065 return NOTIFY_OK;
1066}
1067
1068static struct notifier_block cpufreq_pol_nb = {
1069 .notifier_call = cpufreq_policy_cb,
1070};
1071
1072static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
1073 void *data)
1074{
1075 struct cpufreq_govinfo *info = data;
1076
1077 switch (val) {
1078 case CPUFREQ_LOAD_CHANGE:
1079 core_ctl_set_busy(info->cpu, info->load);
1080 break;
1081 }
1082
1083 return NOTIFY_OK;
1084}
1085
1086static struct notifier_block cpufreq_gov_nb = {
1087 .notifier_call = cpufreq_gov_cb,
1088};
1089
1090static int __init core_ctl_init(void)
1091{
Olav Haugan9306c802016-08-18 17:22:44 -07001092 unsigned int cpu;
1093
Olav Haugan833926cb2016-06-27 11:38:06 -07001094 core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
1095 * NSEC_PER_MSEC;
1096
Olav Haugan9306c802016-08-18 17:22:44 -07001097 register_cpu_notifier(&cpu_notifier);
1098 cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
1099 cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
Olav Haugan9306c802016-08-18 17:22:44 -07001100
Olav Haugan1e8a44c2016-11-17 18:31:33 -08001101 cpu_maps_update_begin();
Olav Haugan9306c802016-08-18 17:22:44 -07001102 for_each_online_cpu(cpu) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001103 struct cpufreq_policy *policy;
1104 int ret;
1105
Olav Haugan9306c802016-08-18 17:22:44 -07001106 policy = cpufreq_cpu_get(cpu);
1107 if (policy) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001108 ret = cluster_init(policy->related_cpus);
1109 if (ret)
1110 pr_warn("unable to create core ctl group: %d\n"
1111 , ret);
Olav Haugan9306c802016-08-18 17:22:44 -07001112 cpufreq_cpu_put(policy);
1113 }
1114 }
Olav Haugan1e8a44c2016-11-17 18:31:33 -08001115 cpu_maps_update_done();
Olav Haugan833926cb2016-06-27 11:38:06 -07001116 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001117 return 0;
1118}
1119
1120late_initcall(core_ctl_init);