blob: d20f758522b8a99f88c0f4c2375d95f1ae1f5cfe [file] [log] [blame]
Patrick Bellasiae710302015-06-23 09:17:54 +01001#include <linux/cgroup.h>
2#include <linux/err.h>
Patrick Bellasi2f369bb2016-01-12 18:12:13 +00003#include <linux/kernel.h>
Patrick Bellasiae710302015-06-23 09:17:54 +01004#include <linux/percpu.h>
5#include <linux/printk.h>
Patrick Bellasiedd28d32015-07-07 15:33:20 +01006#include <linux/rcupdate.h>
Patrick Bellasiae710302015-06-23 09:17:54 +01007#include <linux/slab.h>
8
Patrick Bellasi050dcb82015-06-22 13:49:07 +01009#include <trace/events/sched.h>
10
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010011#include "sched.h"
Patrick Bellasic5b20422016-07-29 15:45:57 +010012#include "tune.h"
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010013
Patrick Bellasid2489002016-07-28 18:44:40 +010014#ifdef CONFIG_CGROUP_SCHEDTUNE
Chris Redpath293edee2017-03-27 18:20:20 +010015bool schedtune_initialized = false;
Patrick Bellasid2489002016-07-28 18:44:40 +010016#endif
17
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010018unsigned int sysctl_sched_cfs_boost __read_mostly;
19
Patrick Bellasid8460c72016-10-13 17:31:24 +010020extern struct reciprocal_value schedtune_spc_rdiv;
Patrick Bellasi7f44e922017-09-12 14:57:51 +010021struct target_nrg schedtune_target_nrg;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000022
23/* Performance Boost region (B) threshold params */
24static int perf_boost_idx;
25
26/* Performance Constraint region (C) threshold params */
27static int perf_constrain_idx;
28
29/**
30 * Performance-Energy (P-E) Space thresholds constants
31 */
32struct threshold_params {
33 int nrg_gain;
34 int cap_gain;
35};
36
37/*
38 * System specific P-E space thresholds constants
39 */
40static struct threshold_params
41threshold_gains[] = {
Patrick Bellasid5563d32016-07-29 15:32:26 +010042 { 0, 5 }, /* < 10% */
43 { 1, 5 }, /* < 20% */
44 { 2, 5 }, /* < 30% */
45 { 3, 5 }, /* < 40% */
46 { 4, 5 }, /* < 50% */
47 { 5, 4 }, /* < 60% */
48 { 5, 3 }, /* < 70% */
49 { 5, 2 }, /* < 80% */
50 { 5, 1 }, /* < 90% */
51 { 5, 0 } /* <= 100% */
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000052};
53
54static int
55__schedtune_accept_deltas(int nrg_delta, int cap_delta,
56 int perf_boost_idx, int perf_constrain_idx)
57{
58 int payoff = -INT_MAX;
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010059 int gain_idx = -1;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000060
61 /* Performance Boost (B) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010062 if (nrg_delta >= 0 && cap_delta > 0)
63 gain_idx = perf_boost_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000064 /* Performance Constraint (C) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010065 else if (nrg_delta < 0 && cap_delta <= 0)
66 gain_idx = perf_constrain_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000067
68 /* Default: reject schedule candidate */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010069 if (gain_idx == -1)
70 return payoff;
71
72 /*
73 * Evaluate "Performance Boost" vs "Energy Increase"
74 *
75 * - Performance Boost (B) region
76 *
77 * Condition: nrg_delta > 0 && cap_delta > 0
78 * Payoff criteria:
79 * cap_gain / nrg_gain < cap_delta / nrg_delta =
80 * cap_gain * nrg_delta < cap_delta * nrg_gain
81 * Note that since both nrg_gain and nrg_delta are positive, the
82 * inequality does not change. Thus:
83 *
84 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
85 *
86 * - Performance Constraint (C) region
87 *
88 * Condition: nrg_delta < 0 && cap_delta < 0
89 * payoff criteria:
90 * cap_gain / nrg_gain > cap_delta / nrg_delta =
91 * cap_gain * nrg_delta < cap_delta * nrg_gain
92 * Note that since nrg_gain > 0 while nrg_delta < 0, the
93 * inequality change. Thus:
94 *
95 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
96 *
97 * This means that, in case of same positive defined {cap,nrg}_gain
98 * for both the B and C regions, we can use the same payoff formula
99 * where a positive value represents the accept condition.
100 */
101 payoff = cap_delta * threshold_gains[gain_idx].nrg_gain;
102 payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
103
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000104 return payoff;
105}
106
Patrick Bellasiae710302015-06-23 09:17:54 +0100107#ifdef CONFIG_CGROUP_SCHEDTUNE
108
109/*
110 * EAS scheduler tunables for task groups.
111 */
112
113/* SchdTune tunables for a group of tasks */
114struct schedtune {
115 /* SchedTune CGroup subsystem */
116 struct cgroup_subsys_state css;
117
118 /* Boost group allocated ID */
119 int idx;
120
121 /* Boost value for tasks on that SchedTune CGroup */
122 int boost;
123
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000124 /* Performance Boost (B) region threshold params */
125 int perf_boost_idx;
126
127 /* Performance Constraint (C) region threshold params */
128 int perf_constrain_idx;
Srinath Sridharan42503db2016-07-14 13:09:03 -0700129
130 /* Hint to bias scheduling of tasks on that SchedTune CGroup
131 * towards idle CPUs */
132 int prefer_idle;
Patrick Bellasiae710302015-06-23 09:17:54 +0100133};
134
135static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
136{
137 return css ? container_of(css, struct schedtune, css) : NULL;
138}
139
140static inline struct schedtune *task_schedtune(struct task_struct *tsk)
141{
142 return css_st(task_css(tsk, schedtune_cgrp_id));
143}
144
145static inline struct schedtune *parent_st(struct schedtune *st)
146{
147 return css_st(st->css.parent);
148}
149
150/*
151 * SchedTune root control group
152 * The root control group is used to defined a system-wide boosting tuning,
153 * which is applied to all tasks in the system.
154 * Task specific boost tuning could be specified by creating and
155 * configuring a child control group under the root one.
156 * By default, system-wide boosting is disabled, i.e. no boosting is applied
157 * to tasks which are not into a child control group.
158 */
159static struct schedtune
160root_schedtune = {
161 .boost = 0,
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000162 .perf_boost_idx = 0,
163 .perf_constrain_idx = 0,
Srinath Sridharan42503db2016-07-14 13:09:03 -0700164 .prefer_idle = 0,
Patrick Bellasiae710302015-06-23 09:17:54 +0100165};
166
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000167int
168schedtune_accept_deltas(int nrg_delta, int cap_delta,
169 struct task_struct *task)
170{
171 struct schedtune *ct;
172 int perf_boost_idx;
173 int perf_constrain_idx;
174
175 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000176 if (nrg_delta < 0 && cap_delta > 0) {
177 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000178 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000179 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000180
181 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000182 if (nrg_delta > 0 && cap_delta < 0) {
183 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000184 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000185 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000186
187 /* Get task specific perf Boost/Constraints indexes */
188 rcu_read_lock();
189 ct = task_schedtune(task);
190 perf_boost_idx = ct->perf_boost_idx;
191 perf_constrain_idx = ct->perf_constrain_idx;
192 rcu_read_unlock();
193
194 return __schedtune_accept_deltas(nrg_delta, cap_delta,
195 perf_boost_idx, perf_constrain_idx);
196}
197
Patrick Bellasiae710302015-06-23 09:17:54 +0100198/*
199 * Maximum number of boost groups to support
200 * When per-task boosting is used we still allow only limited number of
201 * boost groups for two main reasons:
202 * 1. on a real system we usually have only few classes of workloads which
203 * make sense to boost with different values (e.g. background vs foreground
204 * tasks, interactive vs low-priority tasks)
205 * 2. a limited number allows for a simpler and more memory/time efficient
206 * implementation especially for the computation of the per-CPU boost
207 * value
208 */
Chris Redpathe3cf6162017-03-24 17:40:51 +0000209#define BOOSTGROUPS_COUNT 5
Patrick Bellasiae710302015-06-23 09:17:54 +0100210
211/* Array of configured boostgroups */
212static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
213 &root_schedtune,
214 NULL,
215};
216
217/* SchedTune boost groups
218 * Keep track of all the boost groups which impact on CPU, for example when a
219 * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
220 * likely with different boost values.
221 * Since on each system we expect only a limited number of boost groups, here
222 * we use a simple array to keep track of the metrics required to compute the
223 * maximum per-CPU boosting value.
224 */
225struct boost_groups {
226 /* Maximum boost value for all RUNNABLE tasks on a CPU */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100227 bool idle;
228 int boost_max;
Patrick Bellasiae710302015-06-23 09:17:54 +0100229 struct {
230 /* The boost for tasks on that boost group */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100231 int boost;
Patrick Bellasiae710302015-06-23 09:17:54 +0100232 /* Count of RUNNABLE tasks on that boost group */
233 unsigned tasks;
234 } group[BOOSTGROUPS_COUNT];
Patrick Bellasid2489002016-07-28 18:44:40 +0100235 /* CPU's boost group locking */
236 raw_spinlock_t lock;
Patrick Bellasiae710302015-06-23 09:17:54 +0100237};
238
239/* Boost groups affecting each CPU in the system */
240DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
241
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000242static void
243schedtune_cpu_update(int cpu)
244{
245 struct boost_groups *bg;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100246 int boost_max;
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000247 int idx;
248
249 bg = &per_cpu(cpu_boost_groups, cpu);
250
251 /* The root boost group is always active */
252 boost_max = bg->group[0].boost;
253 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
254 /*
255 * A boost group affects a CPU only if it has
256 * RUNNABLE tasks on that CPU
257 */
258 if (bg->group[idx].tasks == 0)
259 continue;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100260
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000261 boost_max = max(boost_max, bg->group[idx].boost);
262 }
Srinath Sridharane71c4252016-07-28 17:28:55 +0100263 /* Ensures boost_max is non-negative when all cgroup boost values
264 * are neagtive. Avoids under-accounting of cpu capacity which may cause
265 * task stacking and frequency spikes.*/
266 boost_max = max(boost_max, 0);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000267 bg->boost_max = boost_max;
268}
269
270static int
271schedtune_boostgroup_update(int idx, int boost)
272{
273 struct boost_groups *bg;
274 int cur_boost_max;
275 int old_boost;
276 int cpu;
277
278 /* Update per CPU boost groups */
279 for_each_possible_cpu(cpu) {
280 bg = &per_cpu(cpu_boost_groups, cpu);
281
282 /*
283 * Keep track of current boost values to compute the per CPU
284 * maximum only when it has been affected by the new value of
285 * the updated boost group
286 */
287 cur_boost_max = bg->boost_max;
288 old_boost = bg->group[idx].boost;
289
290 /* Update the boost value of this boost group */
291 bg->group[idx].boost = boost;
292
293 /* Check if this update increase current max */
294 if (boost > cur_boost_max && bg->group[idx].tasks) {
295 bg->boost_max = boost;
Patrick Bellasi953b1042015-06-24 15:36:08 +0100296 trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000297 continue;
298 }
299
300 /* Check if this update has decreased current max */
Patrick Bellasi953b1042015-06-24 15:36:08 +0100301 if (cur_boost_max == old_boost && old_boost > boost) {
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000302 schedtune_cpu_update(cpu);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100303 trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
304 continue;
305 }
306
307 trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000308 }
309
310 return 0;
311}
312
Patrick Bellasid2489002016-07-28 18:44:40 +0100313#define ENQUEUE_TASK 1
314#define DEQUEUE_TASK -1
315
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100316static inline void
317schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
318{
Patrick Bellasid2489002016-07-28 18:44:40 +0100319 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
320 int tasks = bg->group[idx].tasks + task_count;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100321
322 /* Update boosted tasks count while avoiding to make it negative */
Patrick Bellasid2489002016-07-28 18:44:40 +0100323 bg->group[idx].tasks = max(0, tasks);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100324
325 trace_sched_tune_tasks_update(p, cpu, tasks, idx,
326 bg->group[idx].boost, bg->boost_max);
327
Patrick Bellasid2489002016-07-28 18:44:40 +0100328 /* Boost group activation or deactivation on that RQ */
329 if (tasks == 1 || tasks == 0)
330 schedtune_cpu_update(cpu);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100331}
332
333/*
334 * NOTE: This function must be called while holding the lock on the CPU RQ
335 */
336void schedtune_enqueue_task(struct task_struct *p, int cpu)
337{
Patrick Bellasid2489002016-07-28 18:44:40 +0100338 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
339 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100340 struct schedtune *st;
341 int idx;
342
Patrick Bellasid2489002016-07-28 18:44:40 +0100343 if (!unlikely(schedtune_initialized))
344 return;
345
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100346 /*
347 * When a task is marked PF_EXITING by do_exit() it's going to be
348 * dequeued and enqueued multiple times in the exit path.
349 * Thus we avoid any further update, since we do not want to change
350 * CPU boosting while the task is exiting.
351 */
352 if (p->flags & PF_EXITING)
353 return;
354
Patrick Bellasid2489002016-07-28 18:44:40 +0100355 /*
356 * Boost group accouting is protected by a per-cpu lock and requires
357 * interrupt to be disabled to avoid race conditions for example on
358 * do_exit()::cgroup_exit() and task migration.
359 */
360 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100361 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100362
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100363 st = task_schedtune(p);
364 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100365
Patrick Bellasid2489002016-07-28 18:44:40 +0100366 schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
367
368 rcu_read_unlock();
369 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
370}
371
Patrick Bellasid2489002016-07-28 18:44:40 +0100372int schedtune_can_attach(struct cgroup_taskset *tset)
373{
374 struct task_struct *task;
375 struct cgroup_subsys_state *css;
376 struct boost_groups *bg;
377 struct rq_flags irq_flags;
378 unsigned int cpu;
379 struct rq *rq;
380 int src_bg; /* Source boost group index */
381 int dst_bg; /* Destination boost group index */
382 int tasks;
383
384 if (!unlikely(schedtune_initialized))
385 return 0;
386
387
388 cgroup_taskset_for_each(task, css, tset) {
389
390 /*
391 * Lock the CPU's RQ the task is enqueued to avoid race
392 * conditions with migration code while the task is being
393 * accounted
394 */
395 rq = lock_rq_of(task, &irq_flags);
396
397 if (!task->on_rq) {
398 unlock_rq_of(rq, task, &irq_flags);
399 continue;
400 }
401
402 /*
403 * Boost group accouting is protected by a per-cpu lock and requires
404 * interrupt to be disabled to avoid race conditions on...
405 */
406 cpu = cpu_of(rq);
407 bg = &per_cpu(cpu_boost_groups, cpu);
408 raw_spin_lock(&bg->lock);
409
410 dst_bg = css_st(css)->idx;
411 src_bg = task_schedtune(task)->idx;
412
413 /*
414 * Current task is not changing boostgroup, which can
415 * happen when the new hierarchy is in use.
416 */
417 if (unlikely(dst_bg == src_bg)) {
418 raw_spin_unlock(&bg->lock);
419 unlock_rq_of(rq, task, &irq_flags);
420 continue;
421 }
422
423 /*
424 * This is the case of a RUNNABLE task which is switching its
425 * current boost group.
426 */
427
428 /* Move task from src to dst boost group */
429 tasks = bg->group[src_bg].tasks - 1;
430 bg->group[src_bg].tasks = max(0, tasks);
431 bg->group[dst_bg].tasks += 1;
432
433 raw_spin_unlock(&bg->lock);
434 unlock_rq_of(rq, task, &irq_flags);
435
436 /* Update CPU boost group */
437 if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
438 schedtune_cpu_update(task_cpu(task));
439
440 }
441
442 return 0;
443}
444
445void schedtune_cancel_attach(struct cgroup_taskset *tset)
446{
447 /* This can happen only if SchedTune controller is mounted with
448 * other hierarchies ane one of them fails. Since usually SchedTune is
449 * mouted on its own hierarcy, for the time being we do not implement
450 * a proper rollback mechanism */
451 WARN(1, "SchedTune cancel attach not implemented");
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100452}
453
454/*
455 * NOTE: This function must be called while holding the lock on the CPU RQ
456 */
457void schedtune_dequeue_task(struct task_struct *p, int cpu)
458{
Patrick Bellasid2489002016-07-28 18:44:40 +0100459 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
460 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100461 struct schedtune *st;
462 int idx;
463
Patrick Bellasid2489002016-07-28 18:44:40 +0100464 if (!unlikely(schedtune_initialized))
465 return;
466
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100467 /*
468 * When a task is marked PF_EXITING by do_exit() it's going to be
469 * dequeued and enqueued multiple times in the exit path.
470 * Thus we avoid any further update, since we do not want to change
471 * CPU boosting while the task is exiting.
Patrick Bellasid2489002016-07-28 18:44:40 +0100472 * The last dequeue is already enforce by the do_exit() code path
473 * via schedtune_exit_task().
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100474 */
475 if (p->flags & PF_EXITING)
476 return;
477
Patrick Bellasid2489002016-07-28 18:44:40 +0100478 /*
479 * Boost group accouting is protected by a per-cpu lock and requires
480 * interrupt to be disabled to avoid race conditions on...
481 */
482 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100483 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100484
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100485 st = task_schedtune(p);
486 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100487
Patrick Bellasid2489002016-07-28 18:44:40 +0100488 schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
489
490 rcu_read_unlock();
491 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
492}
493
494void schedtune_exit_task(struct task_struct *tsk)
495{
496 struct schedtune *st;
497 struct rq_flags irq_flags;
498 unsigned int cpu;
499 struct rq *rq;
500 int idx;
501
502 if (!unlikely(schedtune_initialized))
503 return;
504
505 rq = lock_rq_of(tsk, &irq_flags);
506 rcu_read_lock();
507
508 cpu = cpu_of(rq);
509 st = task_schedtune(tsk);
510 idx = st->idx;
511 schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
512
513 rcu_read_unlock();
514 unlock_rq_of(rq, tsk, &irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100515}
516
517int schedtune_cpu_boost(int cpu)
518{
519 struct boost_groups *bg;
520
521 bg = &per_cpu(cpu_boost_groups, cpu);
522 return bg->boost_max;
523}
524
Patrick Bellasi9b2b8da2016-01-14 18:31:53 +0000525int schedtune_task_boost(struct task_struct *p)
526{
527 struct schedtune *st;
528 int task_boost;
529
Chris Redpath293edee2017-03-27 18:20:20 +0100530 if (!unlikely(schedtune_initialized))
531 return 0;
532
Patrick Bellasi9b2b8da2016-01-14 18:31:53 +0000533 /* Get task boost value */
534 rcu_read_lock();
535 st = task_schedtune(p);
536 task_boost = st->boost;
537 rcu_read_unlock();
538
539 return task_boost;
540}
541
Srinath Sridharan42503db2016-07-14 13:09:03 -0700542int schedtune_prefer_idle(struct task_struct *p)
543{
544 struct schedtune *st;
545 int prefer_idle;
546
Chris Redpath293edee2017-03-27 18:20:20 +0100547 if (!unlikely(schedtune_initialized))
548 return 0;
549
Srinath Sridharan42503db2016-07-14 13:09:03 -0700550 /* Get prefer_idle value */
551 rcu_read_lock();
552 st = task_schedtune(p);
553 prefer_idle = st->prefer_idle;
554 rcu_read_unlock();
555
556 return prefer_idle;
557}
558
559static u64
560prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
561{
562 struct schedtune *st = css_st(css);
563
564 return st->prefer_idle;
565}
566
567static int
568prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
569 u64 prefer_idle)
570{
571 struct schedtune *st = css_st(css);
572 st->prefer_idle = prefer_idle;
573
574 return 0;
575}
576
Srinath Sridharane71c4252016-07-28 17:28:55 +0100577static s64
Patrick Bellasiae710302015-06-23 09:17:54 +0100578boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
579{
580 struct schedtune *st = css_st(css);
581
582 return st->boost;
583}
584
585static int
586boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
Srinath Sridharane71c4252016-07-28 17:28:55 +0100587 s64 boost)
Patrick Bellasiae710302015-06-23 09:17:54 +0100588{
589 struct schedtune *st = css_st(css);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100590 unsigned threshold_idx;
591 int boost_pct;
Patrick Bellasiae710302015-06-23 09:17:54 +0100592
Srinath Sridharane71c4252016-07-28 17:28:55 +0100593 if (boost < -100 || boost > 100)
Patrick Bellasiae710302015-06-23 09:17:54 +0100594 return -EINVAL;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100595 boost_pct = boost;
596
597 /*
598 * Update threshold params for Performance Boost (B)
599 * and Performance Constraint (C) regions.
600 * The current implementatio uses the same cuts for both
601 * B and C regions.
602 */
603 threshold_idx = clamp(boost_pct, 0, 99) / 10;
604 st->perf_boost_idx = threshold_idx;
605 st->perf_constrain_idx = threshold_idx;
Patrick Bellasiae710302015-06-23 09:17:54 +0100606
607 st->boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100608 if (css == &root_schedtune.css) {
Patrick Bellasiae710302015-06-23 09:17:54 +0100609 sysctl_sched_cfs_boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100610 perf_boost_idx = threshold_idx;
611 perf_constrain_idx = threshold_idx;
612 }
Patrick Bellasiae710302015-06-23 09:17:54 +0100613
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000614 /* Update CPU boost */
615 schedtune_boostgroup_update(st->idx, st->boost);
616
Patrick Bellasi050dcb82015-06-22 13:49:07 +0100617 trace_sched_tune_config(st->boost);
618
Patrick Bellasiae710302015-06-23 09:17:54 +0100619 return 0;
620}
621
622static struct cftype files[] = {
623 {
624 .name = "boost",
Srinath Sridharane71c4252016-07-28 17:28:55 +0100625 .read_s64 = boost_read,
626 .write_s64 = boost_write,
Patrick Bellasiae710302015-06-23 09:17:54 +0100627 },
Srinath Sridharan42503db2016-07-14 13:09:03 -0700628 {
629 .name = "prefer_idle",
630 .read_u64 = prefer_idle_read,
631 .write_u64 = prefer_idle_write,
632 },
Patrick Bellasiae710302015-06-23 09:17:54 +0100633 { } /* terminate */
634};
635
636static int
637schedtune_boostgroup_init(struct schedtune *st)
638{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000639 struct boost_groups *bg;
640 int cpu;
641
Patrick Bellasiae710302015-06-23 09:17:54 +0100642 /* Keep track of allocated boost groups */
643 allocated_group[st->idx] = st;
644
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000645 /* Initialize the per CPU boost groups */
646 for_each_possible_cpu(cpu) {
647 bg = &per_cpu(cpu_boost_groups, cpu);
648 bg->group[st->idx].boost = 0;
649 bg->group[st->idx].tasks = 0;
650 }
651
Patrick Bellasiae710302015-06-23 09:17:54 +0100652 return 0;
653}
654
Patrick Bellasiae710302015-06-23 09:17:54 +0100655static struct cgroup_subsys_state *
656schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
657{
658 struct schedtune *st;
659 int idx;
660
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100661 if (!parent_css)
Patrick Bellasiae710302015-06-23 09:17:54 +0100662 return &root_schedtune.css;
Patrick Bellasiae710302015-06-23 09:17:54 +0100663
664 /* Allow only single level hierachies */
665 if (parent_css != &root_schedtune.css) {
666 pr_err("Nested SchedTune boosting groups not allowed\n");
667 return ERR_PTR(-ENOMEM);
668 }
669
670 /* Allow only a limited number of boosting groups */
671 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
672 if (!allocated_group[idx])
673 break;
674 if (idx == BOOSTGROUPS_COUNT) {
675 pr_err("Trying to create more than %d SchedTune boosting groups\n",
676 BOOSTGROUPS_COUNT);
677 return ERR_PTR(-ENOSPC);
678 }
679
680 st = kzalloc(sizeof(*st), GFP_KERNEL);
681 if (!st)
682 goto out;
683
684 /* Initialize per CPUs boost group support */
685 st->idx = idx;
686 if (schedtune_boostgroup_init(st))
687 goto release;
688
689 return &st->css;
690
691release:
692 kfree(st);
693out:
694 return ERR_PTR(-ENOMEM);
695}
696
697static void
698schedtune_boostgroup_release(struct schedtune *st)
699{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000700 /* Reset this boost group */
701 schedtune_boostgroup_update(st->idx, 0);
702
Patrick Bellasiae710302015-06-23 09:17:54 +0100703 /* Keep track of allocated boost groups */
704 allocated_group[st->idx] = NULL;
705}
706
707static void
708schedtune_css_free(struct cgroup_subsys_state *css)
709{
710 struct schedtune *st = css_st(css);
711
712 schedtune_boostgroup_release(st);
713 kfree(st);
714}
715
716struct cgroup_subsys schedtune_cgrp_subsys = {
717 .css_alloc = schedtune_css_alloc,
718 .css_free = schedtune_css_free,
Patrick Bellasid2489002016-07-28 18:44:40 +0100719 .can_attach = schedtune_can_attach,
720 .cancel_attach = schedtune_cancel_attach,
Patrick Bellasiae710302015-06-23 09:17:54 +0100721 .legacy_cftypes = files,
722 .early_init = 1,
723};
724
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100725static inline void
726schedtune_init_cgroups(void)
727{
728 struct boost_groups *bg;
729 int cpu;
730
731 /* Initialize the per CPU boost groups */
732 for_each_possible_cpu(cpu) {
733 bg = &per_cpu(cpu_boost_groups, cpu);
734 memset(bg, 0, sizeof(struct boost_groups));
Ke Wang751e5092016-11-25 13:38:45 +0800735 raw_spin_lock_init(&bg->lock);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100736 }
737
738 pr_info("schedtune: configured to support %d boost groups\n",
739 BOOSTGROUPS_COUNT);
Patrick Bellasi82ab2432016-08-24 11:02:29 +0100740
741 schedtune_initialized = true;
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100742}
743
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000744#else /* CONFIG_CGROUP_SCHEDTUNE */
745
746int
747schedtune_accept_deltas(int nrg_delta, int cap_delta,
748 struct task_struct *task)
749{
750 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000751 if (nrg_delta < 0 && cap_delta > 0) {
752 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000753 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000754 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000755
756 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000757 if (nrg_delta > 0 && cap_delta < 0) {
758 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000759 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000760 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000761
762 return __schedtune_accept_deltas(nrg_delta, cap_delta,
763 perf_boost_idx, perf_constrain_idx);
764}
765
Patrick Bellasiae710302015-06-23 09:17:54 +0100766#endif /* CONFIG_CGROUP_SCHEDTUNE */
767
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100768int
769sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
770 void __user *buffer, size_t *lenp,
771 loff_t *ppos)
772{
773 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100774 unsigned threshold_idx;
775 int boost_pct;
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100776
777 if (ret || !write)
778 return ret;
779
Patrick Bellasid5563d32016-07-29 15:32:26 +0100780 if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
781 return -EINVAL;
782 boost_pct = sysctl_sched_cfs_boost;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000783
Patrick Bellasid5563d32016-07-29 15:32:26 +0100784 /*
785 * Update threshold params for Performance Boost (B)
786 * and Performance Constraint (C) regions.
787 * The current implementatio uses the same cuts for both
788 * B and C regions.
789 */
790 threshold_idx = clamp(boost_pct, 0, 99) / 10;
791 perf_boost_idx = threshold_idx;
792 perf_constrain_idx = threshold_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000793
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100794 return 0;
795}
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000796
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000797#ifdef CONFIG_SCHED_DEBUG
798static void
799schedtune_test_nrg(unsigned long delta_pwr)
800{
801 unsigned long test_delta_pwr;
802 unsigned long test_norm_pwr;
803 int idx;
804
805 /*
806 * Check normalization constants using some constant system
807 * energy values
808 */
809 pr_info("schedtune: verify normalization constants...\n");
810 for (idx = 0; idx < 6; ++idx) {
811 test_delta_pwr = delta_pwr >> idx;
812
813 /* Normalize on max energy for target platform */
814 test_norm_pwr = reciprocal_divide(
815 test_delta_pwr << SCHED_CAPACITY_SHIFT,
816 schedtune_target_nrg.rdiv);
817
818 pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
819 idx, test_delta_pwr, test_norm_pwr);
820 }
821}
822#else
823#define schedtune_test_nrg(delta_pwr)
824#endif
825
826/*
827 * Compute the min/max power consumption of a cluster and all its CPUs
828 */
829static void
830schedtune_add_cluster_nrg(
831 struct sched_domain *sd,
832 struct sched_group *sg,
833 struct target_nrg *ste)
834{
835 struct sched_domain *sd2;
836 struct sched_group *sg2;
837
838 struct cpumask *cluster_cpus;
839 char str[32];
840
841 unsigned long min_pwr;
842 unsigned long max_pwr;
843 int cpu;
844
845 /* Get Cluster energy using EM data for the first CPU */
846 cluster_cpus = sched_group_cpus(sg);
847 snprintf(str, 32, "CLUSTER[%*pbl]",
848 cpumask_pr_args(cluster_cpus));
849
850 min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
851 max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
852 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
853 str, min_pwr, max_pwr);
854
855 /*
856 * Keep track of this cluster's energy in the computation of the
857 * overall system energy
858 */
859 ste->min_power += min_pwr;
860 ste->max_power += max_pwr;
861
862 /* Get CPU energy using EM data for each CPU in the group */
863 for_each_cpu(cpu, cluster_cpus) {
864 /* Get a SD view for the specific CPU */
865 for_each_domain(cpu, sd2) {
866 /* Get the CPU group */
867 sg2 = sd2->groups;
868 min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
869 max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
870
871 ste->min_power += min_pwr;
872 ste->max_power += max_pwr;
873
874 snprintf(str, 32, "CPU[%d]", cpu);
875 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
876 str, min_pwr, max_pwr);
877
878 /*
879 * Assume we have EM data only at the CPU and
880 * the upper CLUSTER level
881 */
882 BUG_ON(!cpumask_equal(
883 sched_group_cpus(sg),
884 sched_group_cpus(sd2->parent->groups)
885 ));
886 break;
887 }
888 }
889}
890
891/*
892 * Initialize the constants required to compute normalized energy.
893 * The values of these constants depends on the EM data for the specific
894 * target system and topology.
895 * Thus, this function is expected to be called by the code
896 * that bind the EM to the topology information.
897 */
898static int
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100899schedtune_init(void)
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000900{
901 struct target_nrg *ste = &schedtune_target_nrg;
902 unsigned long delta_pwr = 0;
903 struct sched_domain *sd;
904 struct sched_group *sg;
905
906 pr_info("schedtune: init normalization constants...\n");
907 ste->max_power = 0;
908 ste->min_power = 0;
909
910 rcu_read_lock();
911
912 /*
913 * When EAS is in use, we always have a pointer to the highest SD
914 * which provides EM data.
915 */
916 sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
917 if (!sd) {
918 pr_info("schedtune: no energy model data\n");
919 goto nodata;
920 }
921
922 sg = sd->groups;
923 do {
924 schedtune_add_cluster_nrg(sd, sg, ste);
925 } while (sg = sg->next, sg != sd->groups);
926
927 rcu_read_unlock();
928
929 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
930 "SYSTEM", ste->min_power, ste->max_power);
931
932 /* Compute normalization constants */
933 delta_pwr = ste->max_power - ste->min_power;
934 ste->rdiv = reciprocal_value(delta_pwr);
935 pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
936 ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
937
938 schedtune_test_nrg(delta_pwr);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100939
940#ifdef CONFIG_CGROUP_SCHEDTUNE
941 schedtune_init_cgroups();
942#else
943 pr_info("schedtune: configured to support global boosting only\n");
944#endif
945
Patrick Bellasid8460c72016-10-13 17:31:24 +0100946 schedtune_spc_rdiv = reciprocal_value(100);
947
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000948 return 0;
949
950nodata:
Patrick Bellasic0c5d552016-10-13 17:34:47 +0100951 pr_warning("schedtune: disabled!\n");
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000952 rcu_read_unlock();
953 return -EINVAL;
954}
Patrick Bellasif4725392016-07-29 16:09:03 +0100955postcore_initcall(schedtune_init);