blob: e56af41c1da9918cf24f82f9ceae5efcd011bd7e [file] [log] [blame]
Pavankumar Kondeti83236e32017-02-10 09:58:58 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Olav Haugan9306c802016-08-18 17:22:44 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Pavankumar Kondeti088f3342017-03-17 15:06:47 +053013#define pr_fmt(fmt) "core_ctl: " fmt
14
Olav Haugan9306c802016-08-18 17:22:44 -070015#include <linux/init.h>
Olav Haugan9306c802016-08-18 17:22:44 -070016#include <linux/cpu.h>
17#include <linux/cpumask.h>
18#include <linux/cpufreq.h>
Olav Haugan9306c802016-08-18 17:22:44 -070019#include <linux/kthread.h>
20#include <linux/sched.h>
21#include <linux/sched/rt.h>
22
Olav Haugan09bcc682016-09-02 17:12:20 -070023#include <trace/events/sched.h>
Olav Haugan9306c802016-08-18 17:22:44 -070024
Pavankumar Kondeti4b621962017-04-10 14:08:31 +053025#define MAX_CPUS_PER_CLUSTER 6
Olav Haugane3a3db92016-06-27 11:35:43 -070026#define MAX_CLUSTERS 2
Olav Haugan9306c802016-08-18 17:22:44 -070027
Olav Haugane3a3db92016-06-27 11:35:43 -070028struct cluster_data {
29 bool inited;
Olav Haugan9306c802016-08-18 17:22:44 -070030 unsigned int min_cpus;
31 unsigned int max_cpus;
32 unsigned int offline_delay_ms;
Olav Haugane3a3db92016-06-27 11:35:43 -070033 unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER];
34 unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
Olav Haugan833926cb2016-06-27 11:38:06 -070035 unsigned int active_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -070036 unsigned int num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +053037 unsigned int nr_isolated_cpus;
Olav Haugan833926cb2016-06-27 11:38:06 -070038 cpumask_t cpu_mask;
Olav Haugan9306c802016-08-18 17:22:44 -070039 unsigned int need_cpus;
40 unsigned int task_thres;
Pavankumar Kondeti005309282017-05-10 15:43:29 +053041 unsigned int max_nr;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +053042 s64 need_ts;
Olav Haugan9306c802016-08-18 17:22:44 -070043 struct list_head lru;
44 bool pending;
45 spinlock_t pending_lock;
46 bool is_big_cluster;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -070047 bool enable;
Olav Haugan9306c802016-08-18 17:22:44 -070048 int nrrun;
Olav Haugan833926cb2016-06-27 11:38:06 -070049 struct task_struct *core_ctl_thread;
Olav Haugane3a3db92016-06-27 11:35:43 -070050 unsigned int first_cpu;
Olav Haugana024f472016-10-13 10:34:11 -070051 unsigned int boost;
Olav Haugan9306c802016-08-18 17:22:44 -070052 struct kobject kobj;
53};
54
Olav Haugane3a3db92016-06-27 11:35:43 -070055struct cpu_data {
Olav Haugane3a3db92016-06-27 11:35:43 -070056 bool is_busy;
57 unsigned int busy;
58 unsigned int cpu;
59 bool not_preferred;
60 struct cluster_data *cluster;
61 struct list_head sib;
Olav Haugan833926cb2016-06-27 11:38:06 -070062 bool isolated_by_us;
Olav Haugane3a3db92016-06-27 11:35:43 -070063};
64
Olav Haugan9306c802016-08-18 17:22:44 -070065static DEFINE_PER_CPU(struct cpu_data, cpu_state);
Olav Haugane3a3db92016-06-27 11:35:43 -070066static struct cluster_data cluster_state[MAX_CLUSTERS];
67static unsigned int num_clusters;
68
69#define for_each_cluster(cluster, idx) \
70 for ((cluster) = &cluster_state[idx]; (idx) < num_clusters;\
71 (idx)++, (cluster) = &cluster_state[idx])
72
Olav Haugan9306c802016-08-18 17:22:44 -070073static DEFINE_SPINLOCK(state_lock);
Olav Haugane3a3db92016-06-27 11:35:43 -070074static void apply_need(struct cluster_data *state);
Olav Haugan833926cb2016-06-27 11:38:06 -070075static void wake_up_core_ctl_thread(struct cluster_data *state);
76static bool initialized;
77
78static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
Olav Haugan9306c802016-08-18 17:22:44 -070079
80/* ========================= sysfs interface =========================== */
81
Olav Haugane3a3db92016-06-27 11:35:43 -070082static ssize_t store_min_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -070083 const char *buf, size_t count)
84{
85 unsigned int val;
86
87 if (sscanf(buf, "%u\n", &val) != 1)
88 return -EINVAL;
89
90 state->min_cpus = min(val, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -070091 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -070092
93 return count;
94}
95
Olav Haugane3a3db92016-06-27 11:35:43 -070096static ssize_t show_min_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -070097{
98 return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus);
99}
100
Olav Haugane3a3db92016-06-27 11:35:43 -0700101static ssize_t store_max_cpus(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700102 const char *buf, size_t count)
103{
104 unsigned int val;
105
106 if (sscanf(buf, "%u\n", &val) != 1)
107 return -EINVAL;
108
109 val = min(val, state->num_cpus);
110 state->max_cpus = val;
111 state->min_cpus = min(state->min_cpus, state->max_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700112 wake_up_core_ctl_thread(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700113
114 return count;
115}
116
Olav Haugane3a3db92016-06-27 11:35:43 -0700117static ssize_t show_max_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700118{
119 return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus);
120}
121
Olav Haugane3a3db92016-06-27 11:35:43 -0700122static ssize_t store_offline_delay_ms(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700123 const char *buf, size_t count)
124{
125 unsigned int val;
126
127 if (sscanf(buf, "%u\n", &val) != 1)
128 return -EINVAL;
129
130 state->offline_delay_ms = val;
131 apply_need(state);
132
133 return count;
134}
135
Olav Haugane3a3db92016-06-27 11:35:43 -0700136static ssize_t show_task_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700137{
138 return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres);
139}
140
Olav Haugane3a3db92016-06-27 11:35:43 -0700141static ssize_t store_task_thres(struct cluster_data *state,
142 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700143{
144 unsigned int val;
145
146 if (sscanf(buf, "%u\n", &val) != 1)
147 return -EINVAL;
148
149 if (val < state->num_cpus)
150 return -EINVAL;
151
152 state->task_thres = val;
153 apply_need(state);
154
155 return count;
156}
157
Olav Haugane3a3db92016-06-27 11:35:43 -0700158static ssize_t show_offline_delay_ms(const struct cluster_data *state,
159 char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700160{
161 return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms);
162}
163
Olav Haugane3a3db92016-06-27 11:35:43 -0700164static ssize_t store_busy_up_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700165 const char *buf, size_t count)
166{
Olav Haugane3a3db92016-06-27 11:35:43 -0700167 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700168 int ret, i;
169
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530170 ret = sscanf(buf, "%u %u %u %u %u %u\n",
171 &val[0], &val[1], &val[2], &val[3],
172 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700173 if (ret != 1 && ret != state->num_cpus)
174 return -EINVAL;
175
176 if (ret == 1) {
177 for (i = 0; i < state->num_cpus; i++)
178 state->busy_up_thres[i] = val[0];
179 } else {
180 for (i = 0; i < state->num_cpus; i++)
181 state->busy_up_thres[i] = val[i];
182 }
183 apply_need(state);
184 return count;
185}
186
Olav Haugane3a3db92016-06-27 11:35:43 -0700187static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700188{
189 int i, count = 0;
190
191 for (i = 0; i < state->num_cpus; i++)
192 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
193 state->busy_up_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700194
Olav Haugan9306c802016-08-18 17:22:44 -0700195 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
196 return count;
197}
198
Olav Haugane3a3db92016-06-27 11:35:43 -0700199static ssize_t store_busy_down_thres(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700200 const char *buf, size_t count)
201{
Olav Haugane3a3db92016-06-27 11:35:43 -0700202 unsigned int val[MAX_CPUS_PER_CLUSTER];
Olav Haugan9306c802016-08-18 17:22:44 -0700203 int ret, i;
204
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530205 ret = sscanf(buf, "%u %u %u %u %u %u\n",
206 &val[0], &val[1], &val[2], &val[3],
207 &val[4], &val[5]);
Olav Haugan9306c802016-08-18 17:22:44 -0700208 if (ret != 1 && ret != state->num_cpus)
209 return -EINVAL;
210
211 if (ret == 1) {
212 for (i = 0; i < state->num_cpus; i++)
213 state->busy_down_thres[i] = val[0];
214 } else {
215 for (i = 0; i < state->num_cpus; i++)
216 state->busy_down_thres[i] = val[i];
217 }
218 apply_need(state);
219 return count;
220}
221
Olav Haugane3a3db92016-06-27 11:35:43 -0700222static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700223{
224 int i, count = 0;
225
226 for (i = 0; i < state->num_cpus; i++)
227 count += snprintf(buf + count, PAGE_SIZE - count, "%u ",
228 state->busy_down_thres[i]);
Olav Haugane3a3db92016-06-27 11:35:43 -0700229
Olav Haugan9306c802016-08-18 17:22:44 -0700230 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
231 return count;
232}
233
Olav Haugane3a3db92016-06-27 11:35:43 -0700234static ssize_t store_is_big_cluster(struct cluster_data *state,
Olav Haugan9306c802016-08-18 17:22:44 -0700235 const char *buf, size_t count)
236{
237 unsigned int val;
238
239 if (sscanf(buf, "%u\n", &val) != 1)
240 return -EINVAL;
241
242 state->is_big_cluster = val ? 1 : 0;
243 return count;
244}
245
Olav Haugane3a3db92016-06-27 11:35:43 -0700246static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700247{
248 return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
249}
250
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700251static ssize_t store_enable(struct cluster_data *state,
252 const char *buf, size_t count)
253{
254 unsigned int val;
255 bool bval;
256
257 if (sscanf(buf, "%u\n", &val) != 1)
258 return -EINVAL;
259
260 bval = !!val;
261 if (bval != state->enable) {
262 state->enable = bval;
263 apply_need(state);
264 }
265
266 return count;
267}
268
269static ssize_t show_enable(const struct cluster_data *state, char *buf)
270{
271 return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
272}
273
Olav Haugane3a3db92016-06-27 11:35:43 -0700274static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700275{
276 return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
277}
278
Olav Haugan833926cb2016-06-27 11:38:06 -0700279static ssize_t show_active_cpus(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700280{
Olav Haugan833926cb2016-06-27 11:38:06 -0700281 return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700282}
283
Olav Haugane3a3db92016-06-27 11:35:43 -0700284static ssize_t show_global_state(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700285{
286 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700287 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700288 ssize_t count = 0;
289 unsigned int cpu;
290
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530291 spin_lock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700292 for_each_possible_cpu(cpu) {
Olav Haugan9306c802016-08-18 17:22:44 -0700293 c = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700294 cluster = c->cluster;
295 if (!cluster || !cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700296 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700297
Olav Haugan9306c802016-08-18 17:22:44 -0700298 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700299 "CPU%u\n", cpu);
300 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan9306c802016-08-18 17:22:44 -0700301 "\tCPU: %u\n", c->cpu);
302 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530303 "\tOnline: %u\n",
304 cpu_online(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700305 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondetifbfa7b32017-03-17 15:52:30 +0530306 "\tIsolated: %u\n",
307 cpu_isolated(c->cpu));
Olav Haugan9306c802016-08-18 17:22:44 -0700308 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700309 "\tFirst CPU: %u\n",
310 cluster->first_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700311 count += snprintf(buf + count, PAGE_SIZE - count,
312 "\tBusy%%: %u\n", c->busy);
313 count += snprintf(buf + count, PAGE_SIZE - count,
314 "\tIs busy: %u\n", c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700315 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530316 "\tNot preferred: %u\n",
317 c->not_preferred);
318 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700319 "\tNr running: %u\n", cluster->nrrun);
Olav Haugan9306c802016-08-18 17:22:44 -0700320 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700321 "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
Olav Haugan9306c802016-08-18 17:22:44 -0700322 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugane3a3db92016-06-27 11:35:43 -0700323 "\tNeed CPUs: %u\n", cluster->need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700324 count += snprintf(buf + count, PAGE_SIZE - count,
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530325 "\tNr isolated CPUs: %u\n",
326 cluster->nr_isolated_cpus);
327 count += snprintf(buf + count, PAGE_SIZE - count,
Olav Haugan833926cb2016-06-27 11:38:06 -0700328 "\tBoost: %u\n", (unsigned int) cluster->boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700329 }
Pavankumar Kondetia207d5c2017-04-13 18:44:24 +0530330 spin_unlock_irq(&state_lock);
Olav Haugan9306c802016-08-18 17:22:44 -0700331
332 return count;
333}
334
Olav Haugane3a3db92016-06-27 11:35:43 -0700335static ssize_t store_not_preferred(struct cluster_data *state,
336 const char *buf, size_t count)
Olav Haugan9306c802016-08-18 17:22:44 -0700337{
338 struct cpu_data *c;
Olav Haugane3a3db92016-06-27 11:35:43 -0700339 unsigned int i;
340 unsigned int val[MAX_CPUS_PER_CLUSTER];
341 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700342 int ret;
343
Pavankumar Kondeti4b621962017-04-10 14:08:31 +0530344 ret = sscanf(buf, "%u %u %u %u %u %u\n",
345 &val[0], &val[1], &val[2], &val[3],
346 &val[4], &val[5]);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530347 if (ret != state->num_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700348 return -EINVAL;
349
Olav Haugane3a3db92016-06-27 11:35:43 -0700350 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530351 for (i = 0; i < state->num_cpus; i++) {
352 c = &per_cpu(cpu_state, i + state->first_cpu);
353 c->not_preferred = val[i];
354 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700355 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700356
357 return count;
358}
359
Olav Haugane3a3db92016-06-27 11:35:43 -0700360static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
Olav Haugan9306c802016-08-18 17:22:44 -0700361{
362 struct cpu_data *c;
363 ssize_t count = 0;
Olav Haugane3a3db92016-06-27 11:35:43 -0700364 unsigned long flags;
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530365 int i;
Olav Haugan9306c802016-08-18 17:22:44 -0700366
Olav Haugane3a3db92016-06-27 11:35:43 -0700367 spin_lock_irqsave(&state_lock, flags);
Pavankumar Kondeti63877bc2017-02-16 08:25:21 +0530368 for (i = 0; i < state->num_cpus; i++) {
369 c = &per_cpu(cpu_state, i + state->first_cpu);
370 count += scnprintf(buf + count, PAGE_SIZE - count,
371 "CPU#%d: %u\n", c->cpu, c->not_preferred);
372 }
Olav Haugane3a3db92016-06-27 11:35:43 -0700373 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700374
375 return count;
376}
377
Olav Haugane3a3db92016-06-27 11:35:43 -0700378
Olav Haugan9306c802016-08-18 17:22:44 -0700379struct core_ctl_attr {
380 struct attribute attr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700381 ssize_t (*show)(const struct cluster_data *, char *);
382 ssize_t (*store)(struct cluster_data *, const char *, size_t count);
Olav Haugan9306c802016-08-18 17:22:44 -0700383};
384
385#define core_ctl_attr_ro(_name) \
386static struct core_ctl_attr _name = \
387__ATTR(_name, 0444, show_##_name, NULL)
388
389#define core_ctl_attr_rw(_name) \
390static struct core_ctl_attr _name = \
391__ATTR(_name, 0644, show_##_name, store_##_name)
392
393core_ctl_attr_rw(min_cpus);
394core_ctl_attr_rw(max_cpus);
395core_ctl_attr_rw(offline_delay_ms);
396core_ctl_attr_rw(busy_up_thres);
397core_ctl_attr_rw(busy_down_thres);
398core_ctl_attr_rw(task_thres);
399core_ctl_attr_rw(is_big_cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700400core_ctl_attr_ro(need_cpus);
Olav Haugan833926cb2016-06-27 11:38:06 -0700401core_ctl_attr_ro(active_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700402core_ctl_attr_ro(global_state);
403core_ctl_attr_rw(not_preferred);
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700404core_ctl_attr_rw(enable);
Olav Haugan9306c802016-08-18 17:22:44 -0700405
406static struct attribute *default_attrs[] = {
407 &min_cpus.attr,
408 &max_cpus.attr,
409 &offline_delay_ms.attr,
410 &busy_up_thres.attr,
411 &busy_down_thres.attr,
412 &task_thres.attr,
413 &is_big_cluster.attr,
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700414 &enable.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700415 &need_cpus.attr,
Olav Haugan833926cb2016-06-27 11:38:06 -0700416 &active_cpus.attr,
Olav Haugan9306c802016-08-18 17:22:44 -0700417 &global_state.attr,
418 &not_preferred.attr,
419 NULL
420};
421
Olav Haugane3a3db92016-06-27 11:35:43 -0700422#define to_cluster_data(k) container_of(k, struct cluster_data, kobj)
Olav Haugan9306c802016-08-18 17:22:44 -0700423#define to_attr(a) container_of(a, struct core_ctl_attr, attr)
424static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
425{
Olav Haugane3a3db92016-06-27 11:35:43 -0700426 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700427 struct core_ctl_attr *cattr = to_attr(attr);
428 ssize_t ret = -EIO;
429
430 if (cattr->show)
431 ret = cattr->show(data, buf);
432
433 return ret;
434}
435
436static ssize_t store(struct kobject *kobj, struct attribute *attr,
437 const char *buf, size_t count)
438{
Olav Haugane3a3db92016-06-27 11:35:43 -0700439 struct cluster_data *data = to_cluster_data(kobj);
Olav Haugan9306c802016-08-18 17:22:44 -0700440 struct core_ctl_attr *cattr = to_attr(attr);
441 ssize_t ret = -EIO;
442
443 if (cattr->store)
444 ret = cattr->store(data, buf, count);
445
446 return ret;
447}
448
449static const struct sysfs_ops sysfs_ops = {
450 .show = show,
451 .store = store,
452};
453
454static struct kobj_type ktype_core_ctl = {
455 .sysfs_ops = &sysfs_ops,
456 .default_attrs = default_attrs,
457};
458
459/* ==================== runqueue based core count =================== */
460
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530461static void update_running_avg(void)
Olav Haugan9306c802016-08-18 17:22:44 -0700462{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530463 int avg, iowait_avg, big_avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530464 int max_nr, big_max_nr;
Olav Haugane3a3db92016-06-27 11:35:43 -0700465 struct cluster_data *cluster;
466 unsigned int index = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700467
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530468 sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
469 &max_nr, &big_max_nr);
Olav Haugan9306c802016-08-18 17:22:44 -0700470
Olav Haugane3a3db92016-06-27 11:35:43 -0700471 for_each_cluster(cluster, index) {
472 if (!cluster->inited)
Olav Haugan9306c802016-08-18 17:22:44 -0700473 continue;
Olav Haugane3a3db92016-06-27 11:35:43 -0700474 cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530475 cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
Olav Haugan9306c802016-08-18 17:22:44 -0700476 }
477}
478
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530479#define MAX_NR_THRESHOLD 4
Olav Haugan9306c802016-08-18 17:22:44 -0700480/* adjust needed CPUs based on current runqueue information */
Olav Haugane3a3db92016-06-27 11:35:43 -0700481static unsigned int apply_task_need(const struct cluster_data *cluster,
482 unsigned int new_need)
Olav Haugan9306c802016-08-18 17:22:44 -0700483{
Olav Haugan833926cb2016-06-27 11:38:06 -0700484 /* unisolate all cores if there are enough tasks */
Olav Haugane3a3db92016-06-27 11:35:43 -0700485 if (cluster->nrrun >= cluster->task_thres)
486 return cluster->num_cpus;
Olav Haugan9306c802016-08-18 17:22:44 -0700487
Olav Haugan833926cb2016-06-27 11:38:06 -0700488 /* only unisolate more cores if there are tasks to run */
Olav Haugane3a3db92016-06-27 11:35:43 -0700489 if (cluster->nrrun > new_need)
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530490 new_need = new_need + 1;
491
492 /*
493 * We don't want tasks to be overcrowded in a cluster.
494 * If any CPU has more than MAX_NR_THRESHOLD in the last
495 * window, bring another CPU to help out.
496 */
497 if (cluster->max_nr > MAX_NR_THRESHOLD)
498 new_need = new_need + 1;
Olav Haugan9306c802016-08-18 17:22:44 -0700499
500 return new_need;
501}
502
Olav Haugan9306c802016-08-18 17:22:44 -0700503/* ======================= load based core count ====================== */
504
Olav Haugane3a3db92016-06-27 11:35:43 -0700505static unsigned int apply_limits(const struct cluster_data *cluster,
506 unsigned int need_cpus)
Olav Haugan9306c802016-08-18 17:22:44 -0700507{
Olav Haugane3a3db92016-06-27 11:35:43 -0700508 return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700509}
510
Olav Haugan833926cb2016-06-27 11:38:06 -0700511static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
512{
513 return cluster->num_cpus -
514 sched_isolate_count(&cluster->cpu_mask, true);
515}
516
517static bool is_active(const struct cpu_data *state)
518{
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530519 return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -0700520}
521
522static bool adjustment_possible(const struct cluster_data *cluster,
523 unsigned int need)
524{
525 return (need < cluster->active_cpus || (need > cluster->active_cpus &&
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530526 cluster->nr_isolated_cpus));
Olav Haugan833926cb2016-06-27 11:38:06 -0700527}
528
Olav Haugane3a3db92016-06-27 11:35:43 -0700529static bool eval_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700530{
531 unsigned long flags;
532 struct cpu_data *c;
533 unsigned int need_cpus = 0, last_need, thres_idx;
534 int ret = 0;
535 bool need_flag = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700536 unsigned int new_need;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530537 s64 now, elapsed;
Olav Haugan9306c802016-08-18 17:22:44 -0700538
Olav Haugane3a3db92016-06-27 11:35:43 -0700539 if (unlikely(!cluster->inited))
Olav Haugan9306c802016-08-18 17:22:44 -0700540 return 0;
541
542 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700543
Sabyasachi Singh9453cb22017-05-09 18:50:19 -0700544 if (cluster->boost || !cluster->enable) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700545 need_cpus = cluster->max_cpus;
546 } else {
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530547 cluster->active_cpus = get_active_cpu_count(cluster);
548 thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700549 list_for_each_entry(c, &cluster->lru, sib) {
550 if (c->busy >= cluster->busy_up_thres[thres_idx])
551 c->is_busy = true;
552 else if (c->busy < cluster->busy_down_thres[thres_idx])
553 c->is_busy = false;
554 need_cpus += c->is_busy;
555 }
556 need_cpus = apply_task_need(cluster, need_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -0700557 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700558 new_need = apply_limits(cluster, need_cpus);
559 need_flag = adjustment_possible(cluster, new_need);
560
Olav Haugane3a3db92016-06-27 11:35:43 -0700561 last_need = cluster->need_cpus;
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530562 now = ktime_to_ms(ktime_get());
Olav Haugan9306c802016-08-18 17:22:44 -0700563
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530564 if (new_need > cluster->active_cpus) {
Olav Haugan9306c802016-08-18 17:22:44 -0700565 ret = 1;
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530566 } else {
567 if (new_need == last_need) {
568 cluster->need_ts = now;
569 spin_unlock_irqrestore(&state_lock, flags);
570 return 0;
571 }
Olav Haugan9306c802016-08-18 17:22:44 -0700572
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530573 elapsed = now - cluster->need_ts;
Olav Haugan833926cb2016-06-27 11:38:06 -0700574 ret = elapsed >= cluster->offline_delay_ms;
Olav Haugan9306c802016-08-18 17:22:44 -0700575 }
576
Pavankumar Kondeti83236e32017-02-10 09:58:58 +0530577 if (ret) {
578 cluster->need_ts = now;
579 cluster->need_cpus = new_need;
580 }
Pavankumar Kondeti08248572017-04-13 15:45:11 +0530581 trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
Olav Haugan9306c802016-08-18 17:22:44 -0700582 ret && need_flag);
583 spin_unlock_irqrestore(&state_lock, flags);
584
585 return ret && need_flag;
586}
587
Olav Haugane3a3db92016-06-27 11:35:43 -0700588static void apply_need(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700589{
Olav Haugane3a3db92016-06-27 11:35:43 -0700590 if (eval_need(cluster))
Olav Haugan833926cb2016-06-27 11:38:06 -0700591 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700592}
593
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530594static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy)
Olav Haugan9306c802016-08-18 17:22:44 -0700595{
Olav Haugan9306c802016-08-18 17:22:44 -0700596 unsigned int old_is_busy = c->is_busy;
597
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530598 if (c->busy == busy)
599 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700600
Olav Haugan9306c802016-08-18 17:22:44 -0700601 c->busy = busy;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530602 trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy);
Olav Haugan9306c802016-08-18 17:22:44 -0700603}
604
605/* ========================= core count enforcement ==================== */
606
Olav Haugan833926cb2016-06-27 11:38:06 -0700607static void wake_up_core_ctl_thread(struct cluster_data *cluster)
Olav Haugan9306c802016-08-18 17:22:44 -0700608{
609 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700610
Olav Haugane3a3db92016-06-27 11:35:43 -0700611 spin_lock_irqsave(&cluster->pending_lock, flags);
612 cluster->pending = true;
613 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700614 wake_up_process(cluster->core_ctl_thread);
Olav Haugan833926cb2016-06-27 11:38:06 -0700615}
616
617static u64 core_ctl_check_timestamp;
Olav Haugan833926cb2016-06-27 11:38:06 -0700618
Olav Haugana024f472016-10-13 10:34:11 -0700619int core_ctl_set_boost(bool boost)
Olav Haugan833926cb2016-06-27 11:38:06 -0700620{
621 unsigned int index = 0;
622 struct cluster_data *cluster;
Olav Haugana024f472016-10-13 10:34:11 -0700623 unsigned long flags;
624 int ret = 0;
625 bool boost_state_changed = false;
Olav Haugan833926cb2016-06-27 11:38:06 -0700626
Pavankumar Kondetid198dde2017-03-21 14:21:21 +0530627 if (unlikely(!initialized))
628 return 0;
629
Olav Haugana024f472016-10-13 10:34:11 -0700630 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700631 for_each_cluster(cluster, index) {
Olav Haugana024f472016-10-13 10:34:11 -0700632 if (cluster->is_big_cluster) {
633 if (boost) {
634 boost_state_changed = !cluster->boost;
635 ++cluster->boost;
636 } else {
637 if (!cluster->boost) {
638 pr_err("Error turning off boost. Boost already turned off\n");
639 ret = -EINVAL;
640 } else {
641 --cluster->boost;
642 boost_state_changed = !cluster->boost;
643 }
644 }
645 break;
Olav Haugan833926cb2016-06-27 11:38:06 -0700646 }
Olav Haugan9306c802016-08-18 17:22:44 -0700647 }
Olav Haugana024f472016-10-13 10:34:11 -0700648 spin_unlock_irqrestore(&state_lock, flags);
649
650 if (boost_state_changed)
651 apply_need(cluster);
652
653 trace_core_ctl_set_boost(cluster->boost, ret);
654
655 return ret;
Olav Haugan9306c802016-08-18 17:22:44 -0700656}
Olav Haugan4d77e572016-11-14 16:14:23 -0800657EXPORT_SYMBOL(core_ctl_set_boost);
Olav Haugan9306c802016-08-18 17:22:44 -0700658
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530659void core_ctl_check(u64 window_start)
Olav Haugan9306c802016-08-18 17:22:44 -0700660{
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530661 int cpu;
662 unsigned int busy;
663 struct cpu_data *c;
664 struct cluster_data *cluster;
665 unsigned int index = 0;
666
Olav Haugan833926cb2016-06-27 11:38:06 -0700667 if (unlikely(!initialized))
668 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700669
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530670 if (window_start == core_ctl_check_timestamp)
671 return;
Olav Haugane3a3db92016-06-27 11:35:43 -0700672
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530673 core_ctl_check_timestamp = window_start;
Olav Haugan833926cb2016-06-27 11:38:06 -0700674
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530675 for_each_possible_cpu(cpu) {
676
677 c = &per_cpu(cpu_state, cpu);
678 cluster = c->cluster;
679
680 if (!cluster || !cluster->inited)
681 continue;
682
683 busy = sched_get_cpu_util(cpu);
684 core_ctl_set_busy(c, busy);
685 }
686
687 update_running_avg();
688
689 for_each_cluster(cluster, index) {
690 if (eval_need(cluster))
691 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700692 }
Olav Haugan9306c802016-08-18 17:22:44 -0700693}
694
Olav Haugan833926cb2016-06-27 11:38:06 -0700695static void move_cpu_lru(struct cpu_data *cpu_data)
Olav Haugan9306c802016-08-18 17:22:44 -0700696{
Olav Haugan833926cb2016-06-27 11:38:06 -0700697 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700698
Olav Haugan833926cb2016-06-27 11:38:06 -0700699 spin_lock_irqsave(&state_lock, flags);
700 list_del(&cpu_data->sib);
701 list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru);
702 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700703}
704
Olav Haugan833926cb2016-06-27 11:38:06 -0700705static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
Olav Haugan9306c802016-08-18 17:22:44 -0700706{
Olav Haugan9306c802016-08-18 17:22:44 -0700707 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800708 unsigned long flags;
709 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530710 unsigned int nr_isolated = 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700711
Olav Haugan85970732016-11-08 13:45:01 -0800712 /*
713 * Protect against entry being removed (and added at tail) by other
714 * thread (hotplug).
715 */
716 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700717 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800718 if (!num_cpus--)
719 break;
720
Olav Haugan833926cb2016-06-27 11:38:06 -0700721 if (!is_active(c))
722 continue;
723 if (cluster->active_cpus == need)
724 break;
725 /* Don't offline busy CPUs. */
726 if (c->is_busy)
727 continue;
Olav Haugan9306c802016-08-18 17:22:44 -0700728
Olav Haugan85970732016-11-08 13:45:01 -0800729 spin_unlock_irqrestore(&state_lock, flags);
730
Olav Haugan833926cb2016-06-27 11:38:06 -0700731 pr_debug("Trying to isolate CPU%u\n", c->cpu);
732 if (!sched_isolate_cpu(c->cpu)) {
733 c->isolated_by_us = true;
734 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530735 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700736 } else {
737 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700738 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700739 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800740 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700741 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530742 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800743 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700744
Olav Haugan833926cb2016-06-27 11:38:06 -0700745 /*
746 * If the number of active CPUs is within the limits, then
747 * don't force isolation of any busy CPUs.
748 */
749 if (cluster->active_cpus <= cluster->max_cpus)
750 return;
Olav Haugan9306c802016-08-18 17:22:44 -0700751
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530752 nr_isolated = 0;
Olav Haugan85970732016-11-08 13:45:01 -0800753 num_cpus = cluster->num_cpus;
754 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700755 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800756 if (!num_cpus--)
757 break;
758
Olav Haugan833926cb2016-06-27 11:38:06 -0700759 if (!is_active(c))
760 continue;
761 if (cluster->active_cpus <= cluster->max_cpus)
762 break;
Olav Haugan9306c802016-08-18 17:22:44 -0700763
Olav Haugan85970732016-11-08 13:45:01 -0800764 spin_unlock_irqrestore(&state_lock, flags);
765
Olav Haugan833926cb2016-06-27 11:38:06 -0700766 pr_debug("Trying to isolate CPU%u\n", c->cpu);
767 if (!sched_isolate_cpu(c->cpu)) {
768 c->isolated_by_us = true;
769 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530770 nr_isolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700771 } else {
772 pr_debug("Unable to isolate CPU%u\n", c->cpu);
Olav Haugan9306c802016-08-18 17:22:44 -0700773 }
Olav Haugan833926cb2016-06-27 11:38:06 -0700774 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800775 spin_lock_irqsave(&state_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700776 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530777 cluster->nr_isolated_cpus += nr_isolated;
Olav Haugan85970732016-11-08 13:45:01 -0800778 spin_unlock_irqrestore(&state_lock, flags);
779
Olav Haugan9306c802016-08-18 17:22:44 -0700780}
781
Olav Haugan833926cb2016-06-27 11:38:06 -0700782static void __try_to_unisolate(struct cluster_data *cluster,
783 unsigned int need, bool force)
784{
785 struct cpu_data *c, *tmp;
Olav Haugan85970732016-11-08 13:45:01 -0800786 unsigned long flags;
787 unsigned int num_cpus = cluster->num_cpus;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530788 unsigned int nr_unisolated = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700789
Olav Haugan85970732016-11-08 13:45:01 -0800790 /*
791 * Protect against entry being removed (and added at tail) by other
792 * thread (hotplug).
793 */
794 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700795 list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
Olav Haugan85970732016-11-08 13:45:01 -0800796 if (!num_cpus--)
797 break;
798
Olav Haugan833926cb2016-06-27 11:38:06 -0700799 if (!c->isolated_by_us)
800 continue;
Pavankumar Kondeti85dfbfa2017-03-17 15:29:13 +0530801 if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
Olav Haugan833926cb2016-06-27 11:38:06 -0700802 (!force && c->not_preferred))
803 continue;
804 if (cluster->active_cpus == need)
805 break;
806
Olav Haugan85970732016-11-08 13:45:01 -0800807 spin_unlock_irqrestore(&state_lock, flags);
808
Olav Haugan833926cb2016-06-27 11:38:06 -0700809 pr_debug("Trying to unisolate CPU%u\n", c->cpu);
810 if (!sched_unisolate_cpu(c->cpu)) {
811 c->isolated_by_us = false;
812 move_cpu_lru(c);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530813 nr_unisolated++;
Olav Haugan833926cb2016-06-27 11:38:06 -0700814 } else {
815 pr_debug("Unable to unisolate CPU%u\n", c->cpu);
816 }
817 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan85970732016-11-08 13:45:01 -0800818 spin_lock_irqsave(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700819 }
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530820 cluster->nr_isolated_cpus -= nr_unisolated;
Olav Haugan85970732016-11-08 13:45:01 -0800821 spin_unlock_irqrestore(&state_lock, flags);
Olav Haugan833926cb2016-06-27 11:38:06 -0700822}
823
824static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)
825{
826 bool force_use_non_preferred = false;
827
828 __try_to_unisolate(cluster, need, force_use_non_preferred);
829
830 if (cluster->active_cpus == need)
831 return;
832
833 force_use_non_preferred = true;
834 __try_to_unisolate(cluster, need, force_use_non_preferred);
835}
836
837static void __ref do_core_ctl(struct cluster_data *cluster)
838{
839 unsigned int need;
840
841 need = apply_limits(cluster, cluster->need_cpus);
842
843 if (adjustment_possible(cluster, need)) {
844 pr_debug("Trying to adjust group %u from %u to %u\n",
845 cluster->first_cpu, cluster->active_cpus, need);
846
847 if (cluster->active_cpus > need)
848 try_to_isolate(cluster, need);
849 else if (cluster->active_cpus < need)
850 try_to_unisolate(cluster, need);
851 }
852}
853
854static int __ref try_core_ctl(void *data)
Olav Haugan9306c802016-08-18 17:22:44 -0700855{
Olav Haugane3a3db92016-06-27 11:35:43 -0700856 struct cluster_data *cluster = data;
Olav Haugan9306c802016-08-18 17:22:44 -0700857 unsigned long flags;
858
859 while (1) {
860 set_current_state(TASK_INTERRUPTIBLE);
Olav Haugane3a3db92016-06-27 11:35:43 -0700861 spin_lock_irqsave(&cluster->pending_lock, flags);
862 if (!cluster->pending) {
863 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700864 schedule();
865 if (kthread_should_stop())
866 break;
Olav Haugane3a3db92016-06-27 11:35:43 -0700867 spin_lock_irqsave(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700868 }
869 set_current_state(TASK_RUNNING);
Olav Haugane3a3db92016-06-27 11:35:43 -0700870 cluster->pending = false;
871 spin_unlock_irqrestore(&cluster->pending_lock, flags);
Olav Haugan9306c802016-08-18 17:22:44 -0700872
Olav Haugan833926cb2016-06-27 11:38:06 -0700873 do_core_ctl(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700874 }
875
876 return 0;
877}
878
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700879static int isolation_cpuhp_state(unsigned int cpu, bool online)
Olav Haugan9306c802016-08-18 17:22:44 -0700880{
Olav Haugan9306c802016-08-18 17:22:44 -0700881 struct cpu_data *state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -0700882 struct cluster_data *cluster = state->cluster;
Olav Haugan833926cb2016-06-27 11:38:06 -0700883 unsigned int need;
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700884 bool do_wakeup = false, unisolated = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530885 unsigned long flags;
Olav Haugan9306c802016-08-18 17:22:44 -0700886
Olav Haugane3a3db92016-06-27 11:35:43 -0700887 if (unlikely(!cluster || !cluster->inited))
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700888 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700889
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700890 if (online) {
Olav Haugan833926cb2016-06-27 11:38:06 -0700891 cluster->active_cpus = get_active_cpu_count(cluster);
892
Olav Haugan9306c802016-08-18 17:22:44 -0700893 /*
894 * Moving to the end of the list should only happen in
895 * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an
896 * infinite list traversal when thermal (or other entities)
897 * reject trying to online CPUs.
898 */
Olav Haugan833926cb2016-06-27 11:38:06 -0700899 move_cpu_lru(state);
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700900 } else {
Olav Haugan833926cb2016-06-27 11:38:06 -0700901 /*
902 * We don't want to have a CPU both offline and isolated.
903 * So unisolate a CPU that went down if it was isolated by us.
904 */
905 if (state->isolated_by_us) {
906 sched_unisolate_cpu_unlocked(cpu);
907 state->isolated_by_us = false;
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530908 unisolated = true;
Olav Haugan833926cb2016-06-27 11:38:06 -0700909 }
910
Olav Haugan9306c802016-08-18 17:22:44 -0700911 /* Move a CPU to the end of the LRU when it goes offline. */
Olav Haugan833926cb2016-06-27 11:38:06 -0700912 move_cpu_lru(state);
Olav Haugan9306c802016-08-18 17:22:44 -0700913
Olav Haugan9306c802016-08-18 17:22:44 -0700914 state->busy = 0;
Olav Haugan833926cb2016-06-27 11:38:06 -0700915 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700916 }
917
Olav Haugan833926cb2016-06-27 11:38:06 -0700918 need = apply_limits(cluster, cluster->need_cpus);
Pavankumar Kondeti32b2dd02017-04-13 16:58:57 +0530919 spin_lock_irqsave(&state_lock, flags);
920 if (unisolated)
921 cluster->nr_isolated_cpus--;
922 do_wakeup = adjustment_possible(cluster, need);
923 spin_unlock_irqrestore(&state_lock, flags);
924 if (do_wakeup)
Olav Haugan833926cb2016-06-27 11:38:06 -0700925 wake_up_core_ctl_thread(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -0700926
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700927 return 0;
Olav Haugan9306c802016-08-18 17:22:44 -0700928}
929
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -0700930static int core_ctl_isolation_online_cpu(unsigned int cpu)
931{
932 return isolation_cpuhp_state(cpu, true);
933}
934
935static int core_ctl_isolation_dead_cpu(unsigned int cpu)
936{
937 return isolation_cpuhp_state(cpu, false);
938}
Olav Haugan9306c802016-08-18 17:22:44 -0700939
940/* ============================ init code ============================== */
941
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530942static cpumask_var_t core_ctl_disable_cpumask;
943static bool core_ctl_disable_cpumask_present;
944
945static int __init core_ctl_disable_setup(char *str)
946{
947 if (!*str)
948 return -EINVAL;
949
950 alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
951
952 if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
953 free_bootmem_cpumask_var(core_ctl_disable_cpumask);
954 return -EINVAL;
955 }
956
957 core_ctl_disable_cpumask_present = true;
958 pr_info("disable_cpumask=%*pbl\n",
959 cpumask_pr_args(core_ctl_disable_cpumask));
960
961 return 0;
962}
963early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
964
965static bool should_skip(const struct cpumask *mask)
966{
967 if (!core_ctl_disable_cpumask_present)
968 return false;
969
970 /*
971 * We operate on a cluster basis. Disable the core_ctl for
972 * a cluster, if all of it's cpus are specified in
973 * core_ctl_disable_cpumask
974 */
975 return cpumask_subset(mask, core_ctl_disable_cpumask);
976}
977
Olav Haugane3a3db92016-06-27 11:35:43 -0700978static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
979{
980 unsigned int i;
981
982 for (i = 0; i < num_clusters; ++i) {
983 if (cluster_state[i].first_cpu == first_cpu)
984 return &cluster_state[i];
985 }
986
987 return NULL;
988}
989
990static int cluster_init(const struct cpumask *mask)
Olav Haugan9306c802016-08-18 17:22:44 -0700991{
992 struct device *dev;
993 unsigned int first_cpu = cpumask_first(mask);
Olav Haugane3a3db92016-06-27 11:35:43 -0700994 struct cluster_data *cluster;
Olav Haugan9306c802016-08-18 17:22:44 -0700995 struct cpu_data *state;
996 unsigned int cpu;
997 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
998
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +0530999 if (should_skip(mask))
1000 return 0;
1001
Olav Haugane3a3db92016-06-27 11:35:43 -07001002 if (find_cluster_by_first_cpu(first_cpu))
Olav Haugan9306c802016-08-18 17:22:44 -07001003 return 0;
1004
1005 dev = get_cpu_device(first_cpu);
1006 if (!dev)
1007 return -ENODEV;
1008
1009 pr_info("Creating CPU group %d\n", first_cpu);
1010
Olav Haugane3a3db92016-06-27 11:35:43 -07001011 if (num_clusters == MAX_CLUSTERS) {
1012 pr_err("Unsupported number of clusters. Only %u supported\n",
1013 MAX_CLUSTERS);
1014 return -EINVAL;
1015 }
1016 cluster = &cluster_state[num_clusters];
1017 ++num_clusters;
1018
Olav Haugan833926cb2016-06-27 11:38:06 -07001019 cpumask_copy(&cluster->cpu_mask, mask);
Olav Haugane3a3db92016-06-27 11:35:43 -07001020 cluster->num_cpus = cpumask_weight(mask);
1021 if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) {
Olav Haugan9306c802016-08-18 17:22:44 -07001022 pr_err("HW configuration not supported\n");
1023 return -EINVAL;
1024 }
Olav Haugane3a3db92016-06-27 11:35:43 -07001025 cluster->first_cpu = first_cpu;
1026 cluster->min_cpus = 1;
1027 cluster->max_cpus = cluster->num_cpus;
1028 cluster->need_cpus = cluster->num_cpus;
Olav Haugane3a3db92016-06-27 11:35:43 -07001029 cluster->offline_delay_ms = 100;
1030 cluster->task_thres = UINT_MAX;
1031 cluster->nrrun = cluster->num_cpus;
Sabyasachi Singh9453cb22017-05-09 18:50:19 -07001032 cluster->enable = true;
Olav Haugane3a3db92016-06-27 11:35:43 -07001033 INIT_LIST_HEAD(&cluster->lru);
Olav Haugane3a3db92016-06-27 11:35:43 -07001034 spin_lock_init(&cluster->pending_lock);
Olav Haugan9306c802016-08-18 17:22:44 -07001035
1036 for_each_cpu(cpu, mask) {
1037 pr_info("Init CPU%u state\n", cpu);
1038
1039 state = &per_cpu(cpu_state, cpu);
Olav Haugane3a3db92016-06-27 11:35:43 -07001040 state->cluster = cluster;
Olav Haugan9306c802016-08-18 17:22:44 -07001041 state->cpu = cpu;
Olav Haugane3a3db92016-06-27 11:35:43 -07001042 list_add_tail(&state->sib, &cluster->lru);
Olav Haugan9306c802016-08-18 17:22:44 -07001043 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001044 cluster->active_cpus = get_active_cpu_count(cluster);
Olav Haugan9306c802016-08-18 17:22:44 -07001045
Olav Haugan833926cb2016-06-27 11:38:06 -07001046 cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster,
Olav Haugan9306c802016-08-18 17:22:44 -07001047 "core_ctl/%d", first_cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001048 if (IS_ERR(cluster->core_ctl_thread))
1049 return PTR_ERR(cluster->core_ctl_thread);
1050
1051 sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO,
Olav Haugane3a3db92016-06-27 11:35:43 -07001052 &param);
Olav Haugan9306c802016-08-18 17:22:44 -07001053
Olav Haugane3a3db92016-06-27 11:35:43 -07001054 cluster->inited = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001055
Olav Haugane3a3db92016-06-27 11:35:43 -07001056 kobject_init(&cluster->kobj, &ktype_core_ctl);
1057 return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
Olav Haugan9306c802016-08-18 17:22:44 -07001058}
1059
Olav Haugan9306c802016-08-18 17:22:44 -07001060static int __init core_ctl_init(void)
1061{
Olav Haugan9306c802016-08-18 17:22:44 -07001062 unsigned int cpu;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301063 struct cpumask cpus = *cpu_possible_mask;
Olav Haugan9306c802016-08-18 17:22:44 -07001064
Pavankumar Kondeti1e951a72017-03-21 14:00:09 +05301065 if (should_skip(cpu_possible_mask))
1066 return 0;
1067
Sabyasachi Singh518d4abc2017-06-26 13:56:47 -07001068 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1069 "core_ctl/isolation:online",
1070 core_ctl_isolation_online_cpu, NULL);
1071
1072 cpuhp_setup_state_nocalls(CPUHP_CORE_CTL_ISOLATION_DEAD,
1073 "core_ctl/isolation:dead",
1074 NULL, core_ctl_isolation_dead_cpu);
Olav Haugan9306c802016-08-18 17:22:44 -07001075
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301076 for_each_cpu(cpu, &cpus) {
Olav Haugan833926cb2016-06-27 11:38:06 -07001077 int ret;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301078 const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
Olav Haugan833926cb2016-06-27 11:38:06 -07001079
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301080 ret = cluster_init(cluster_cpus);
1081 if (ret)
1082 pr_warn("unable to create core ctl group: %d\n", ret);
1083 cpumask_andnot(&cpus, &cpus, cluster_cpus);
Olav Haugan9306c802016-08-18 17:22:44 -07001084 }
Olav Haugan833926cb2016-06-27 11:38:06 -07001085 initialized = true;
Olav Haugan9306c802016-08-18 17:22:44 -07001086 return 0;
1087}
1088
1089late_initcall(core_ctl_init);