blob: 9be1bb0d69b19c08c0f35c68f8cd62fbc48ae9e7 [file] [log] [blame]
Patrick Bellasiae710302015-06-23 09:17:54 +01001#include <linux/cgroup.h>
2#include <linux/err.h>
Patrick Bellasi2f369bb2016-01-12 18:12:13 +00003#include <linux/kernel.h>
Patrick Bellasiae710302015-06-23 09:17:54 +01004#include <linux/percpu.h>
5#include <linux/printk.h>
Patrick Bellasiedd28d32015-07-07 15:33:20 +01006#include <linux/rcupdate.h>
Patrick Bellasiae710302015-06-23 09:17:54 +01007#include <linux/slab.h>
8
Patrick Bellasi050dcb82015-06-22 13:49:07 +01009#include <trace/events/sched.h>
10
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010011#include "sched.h"
Patrick Bellasic5b20422016-07-29 15:45:57 +010012#include "tune.h"
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010013
Patrick Bellasid2489002016-07-28 18:44:40 +010014#ifdef CONFIG_CGROUP_SCHEDTUNE
Chris Redpath293edee2017-03-27 18:20:20 +010015bool schedtune_initialized = false;
Patrick Bellasid2489002016-07-28 18:44:40 +010016#endif
17
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010018unsigned int sysctl_sched_cfs_boost __read_mostly;
19
Patrick Bellasid8460c72016-10-13 17:31:24 +010020extern struct reciprocal_value schedtune_spc_rdiv;
Patrick Bellasi7f44e922017-09-12 14:57:51 +010021struct target_nrg schedtune_target_nrg;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000022
23/* Performance Boost region (B) threshold params */
24static int perf_boost_idx;
25
26/* Performance Constraint region (C) threshold params */
27static int perf_constrain_idx;
28
29/**
30 * Performance-Energy (P-E) Space thresholds constants
31 */
32struct threshold_params {
33 int nrg_gain;
34 int cap_gain;
35};
36
37/*
38 * System specific P-E space thresholds constants
39 */
40static struct threshold_params
41threshold_gains[] = {
Patrick Bellasid5563d32016-07-29 15:32:26 +010042 { 0, 5 }, /* < 10% */
43 { 1, 5 }, /* < 20% */
44 { 2, 5 }, /* < 30% */
45 { 3, 5 }, /* < 40% */
46 { 4, 5 }, /* < 50% */
47 { 5, 4 }, /* < 60% */
48 { 5, 3 }, /* < 70% */
49 { 5, 2 }, /* < 80% */
50 { 5, 1 }, /* < 90% */
51 { 5, 0 } /* <= 100% */
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000052};
53
54static int
55__schedtune_accept_deltas(int nrg_delta, int cap_delta,
56 int perf_boost_idx, int perf_constrain_idx)
57{
58 int payoff = -INT_MAX;
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010059 int gain_idx = -1;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000060
61 /* Performance Boost (B) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010062 if (nrg_delta >= 0 && cap_delta > 0)
63 gain_idx = perf_boost_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000064 /* Performance Constraint (C) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010065 else if (nrg_delta < 0 && cap_delta <= 0)
66 gain_idx = perf_constrain_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000067
68 /* Default: reject schedule candidate */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010069 if (gain_idx == -1)
70 return payoff;
71
72 /*
73 * Evaluate "Performance Boost" vs "Energy Increase"
74 *
75 * - Performance Boost (B) region
76 *
77 * Condition: nrg_delta > 0 && cap_delta > 0
78 * Payoff criteria:
79 * cap_gain / nrg_gain < cap_delta / nrg_delta =
80 * cap_gain * nrg_delta < cap_delta * nrg_gain
81 * Note that since both nrg_gain and nrg_delta are positive, the
82 * inequality does not change. Thus:
83 *
84 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
85 *
86 * - Performance Constraint (C) region
87 *
88 * Condition: nrg_delta < 0 && cap_delta < 0
89 * payoff criteria:
90 * cap_gain / nrg_gain > cap_delta / nrg_delta =
91 * cap_gain * nrg_delta < cap_delta * nrg_gain
92 * Note that since nrg_gain > 0 while nrg_delta < 0, the
93 * inequality change. Thus:
94 *
95 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
96 *
97 * This means that, in case of same positive defined {cap,nrg}_gain
98 * for both the B and C regions, we can use the same payoff formula
99 * where a positive value represents the accept condition.
100 */
101 payoff = cap_delta * threshold_gains[gain_idx].nrg_gain;
102 payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
103
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000104 return payoff;
105}
106
Patrick Bellasiae710302015-06-23 09:17:54 +0100107#ifdef CONFIG_CGROUP_SCHEDTUNE
108
109/*
110 * EAS scheduler tunables for task groups.
111 */
112
113/* SchdTune tunables for a group of tasks */
114struct schedtune {
115 /* SchedTune CGroup subsystem */
116 struct cgroup_subsys_state css;
117
118 /* Boost group allocated ID */
119 int idx;
120
121 /* Boost value for tasks on that SchedTune CGroup */
122 int boost;
123
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000124 /* Performance Boost (B) region threshold params */
125 int perf_boost_idx;
126
127 /* Performance Constraint (C) region threshold params */
128 int perf_constrain_idx;
Srinath Sridharan42503db2016-07-14 13:09:03 -0700129
130 /* Hint to bias scheduling of tasks on that SchedTune CGroup
131 * towards idle CPUs */
132 int prefer_idle;
Patrick Bellasiae710302015-06-23 09:17:54 +0100133};
134
135static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
136{
137 return css ? container_of(css, struct schedtune, css) : NULL;
138}
139
140static inline struct schedtune *task_schedtune(struct task_struct *tsk)
141{
142 return css_st(task_css(tsk, schedtune_cgrp_id));
143}
144
145static inline struct schedtune *parent_st(struct schedtune *st)
146{
147 return css_st(st->css.parent);
148}
149
150/*
151 * SchedTune root control group
152 * The root control group is used to defined a system-wide boosting tuning,
153 * which is applied to all tasks in the system.
154 * Task specific boost tuning could be specified by creating and
155 * configuring a child control group under the root one.
156 * By default, system-wide boosting is disabled, i.e. no boosting is applied
157 * to tasks which are not into a child control group.
158 */
159static struct schedtune
160root_schedtune = {
161 .boost = 0,
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000162 .perf_boost_idx = 0,
163 .perf_constrain_idx = 0,
Srinath Sridharan42503db2016-07-14 13:09:03 -0700164 .prefer_idle = 0,
Patrick Bellasiae710302015-06-23 09:17:54 +0100165};
166
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000167int
168schedtune_accept_deltas(int nrg_delta, int cap_delta,
169 struct task_struct *task)
170{
171 struct schedtune *ct;
172 int perf_boost_idx;
173 int perf_constrain_idx;
174
175 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000176 if (nrg_delta < 0 && cap_delta > 0) {
177 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000178 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000179 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000180
181 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000182 if (nrg_delta > 0 && cap_delta < 0) {
183 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000184 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000185 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000186
187 /* Get task specific perf Boost/Constraints indexes */
188 rcu_read_lock();
189 ct = task_schedtune(task);
190 perf_boost_idx = ct->perf_boost_idx;
191 perf_constrain_idx = ct->perf_constrain_idx;
192 rcu_read_unlock();
193
194 return __schedtune_accept_deltas(nrg_delta, cap_delta,
195 perf_boost_idx, perf_constrain_idx);
196}
197
Patrick Bellasiae710302015-06-23 09:17:54 +0100198/*
199 * Maximum number of boost groups to support
200 * When per-task boosting is used we still allow only limited number of
201 * boost groups for two main reasons:
202 * 1. on a real system we usually have only few classes of workloads which
203 * make sense to boost with different values (e.g. background vs foreground
204 * tasks, interactive vs low-priority tasks)
205 * 2. a limited number allows for a simpler and more memory/time efficient
206 * implementation especially for the computation of the per-CPU boost
207 * value
208 */
Chris Redpathe3cf6162017-03-24 17:40:51 +0000209#define BOOSTGROUPS_COUNT 5
Patrick Bellasiae710302015-06-23 09:17:54 +0100210
211/* Array of configured boostgroups */
212static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
213 &root_schedtune,
214 NULL,
215};
216
217/* SchedTune boost groups
218 * Keep track of all the boost groups which impact on CPU, for example when a
219 * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
220 * likely with different boost values.
221 * Since on each system we expect only a limited number of boost groups, here
222 * we use a simple array to keep track of the metrics required to compute the
223 * maximum per-CPU boosting value.
224 */
225struct boost_groups {
226 /* Maximum boost value for all RUNNABLE tasks on a CPU */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100227 bool idle;
228 int boost_max;
Patrick Bellasiae710302015-06-23 09:17:54 +0100229 struct {
230 /* The boost for tasks on that boost group */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100231 int boost;
Patrick Bellasiae710302015-06-23 09:17:54 +0100232 /* Count of RUNNABLE tasks on that boost group */
233 unsigned tasks;
234 } group[BOOSTGROUPS_COUNT];
Patrick Bellasid2489002016-07-28 18:44:40 +0100235 /* CPU's boost group locking */
236 raw_spinlock_t lock;
Patrick Bellasiae710302015-06-23 09:17:54 +0100237};
238
239/* Boost groups affecting each CPU in the system */
240DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
241
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000242static void
243schedtune_cpu_update(int cpu)
244{
245 struct boost_groups *bg;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100246 int boost_max;
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000247 int idx;
248
249 bg = &per_cpu(cpu_boost_groups, cpu);
250
251 /* The root boost group is always active */
252 boost_max = bg->group[0].boost;
253 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
254 /*
255 * A boost group affects a CPU only if it has
256 * RUNNABLE tasks on that CPU
257 */
258 if (bg->group[idx].tasks == 0)
259 continue;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100260
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000261 boost_max = max(boost_max, bg->group[idx].boost);
262 }
Srinath Sridharane71c4252016-07-28 17:28:55 +0100263 /* Ensures boost_max is non-negative when all cgroup boost values
264 * are neagtive. Avoids under-accounting of cpu capacity which may cause
265 * task stacking and frequency spikes.*/
266 boost_max = max(boost_max, 0);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000267 bg->boost_max = boost_max;
268}
269
270static int
271schedtune_boostgroup_update(int idx, int boost)
272{
273 struct boost_groups *bg;
274 int cur_boost_max;
275 int old_boost;
276 int cpu;
277
278 /* Update per CPU boost groups */
279 for_each_possible_cpu(cpu) {
280 bg = &per_cpu(cpu_boost_groups, cpu);
281
282 /*
283 * Keep track of current boost values to compute the per CPU
284 * maximum only when it has been affected by the new value of
285 * the updated boost group
286 */
287 cur_boost_max = bg->boost_max;
288 old_boost = bg->group[idx].boost;
289
290 /* Update the boost value of this boost group */
291 bg->group[idx].boost = boost;
292
293 /* Check if this update increase current max */
294 if (boost > cur_boost_max && bg->group[idx].tasks) {
295 bg->boost_max = boost;
Patrick Bellasi953b1042015-06-24 15:36:08 +0100296 trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000297 continue;
298 }
299
300 /* Check if this update has decreased current max */
Patrick Bellasi953b1042015-06-24 15:36:08 +0100301 if (cur_boost_max == old_boost && old_boost > boost) {
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000302 schedtune_cpu_update(cpu);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100303 trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
304 continue;
305 }
306
307 trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000308 }
309
310 return 0;
311}
312
Patrick Bellasid2489002016-07-28 18:44:40 +0100313#define ENQUEUE_TASK 1
314#define DEQUEUE_TASK -1
315
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100316static inline void
317schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
318{
Patrick Bellasid2489002016-07-28 18:44:40 +0100319 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
320 int tasks = bg->group[idx].tasks + task_count;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100321
322 /* Update boosted tasks count while avoiding to make it negative */
Patrick Bellasid2489002016-07-28 18:44:40 +0100323 bg->group[idx].tasks = max(0, tasks);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100324
325 trace_sched_tune_tasks_update(p, cpu, tasks, idx,
326 bg->group[idx].boost, bg->boost_max);
327
Patrick Bellasid2489002016-07-28 18:44:40 +0100328 /* Boost group activation or deactivation on that RQ */
329 if (tasks == 1 || tasks == 0)
330 schedtune_cpu_update(cpu);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100331}
332
333/*
334 * NOTE: This function must be called while holding the lock on the CPU RQ
335 */
336void schedtune_enqueue_task(struct task_struct *p, int cpu)
337{
Patrick Bellasid2489002016-07-28 18:44:40 +0100338 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
339 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100340 struct schedtune *st;
341 int idx;
342
Patrick Bellasid2489002016-07-28 18:44:40 +0100343 if (!unlikely(schedtune_initialized))
344 return;
345
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100346 /*
347 * When a task is marked PF_EXITING by do_exit() it's going to be
348 * dequeued and enqueued multiple times in the exit path.
349 * Thus we avoid any further update, since we do not want to change
350 * CPU boosting while the task is exiting.
351 */
352 if (p->flags & PF_EXITING)
353 return;
354
Patrick Bellasid2489002016-07-28 18:44:40 +0100355 /*
356 * Boost group accouting is protected by a per-cpu lock and requires
357 * interrupt to be disabled to avoid race conditions for example on
358 * do_exit()::cgroup_exit() and task migration.
359 */
360 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100361 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100362
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100363 st = task_schedtune(p);
364 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100365
Patrick Bellasid2489002016-07-28 18:44:40 +0100366 schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
367
368 rcu_read_unlock();
369 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
370}
371
Patrick Bellasid2489002016-07-28 18:44:40 +0100372int schedtune_can_attach(struct cgroup_taskset *tset)
373{
374 struct task_struct *task;
375 struct cgroup_subsys_state *css;
376 struct boost_groups *bg;
377 struct rq_flags irq_flags;
378 unsigned int cpu;
379 struct rq *rq;
380 int src_bg; /* Source boost group index */
381 int dst_bg; /* Destination boost group index */
382 int tasks;
383
384 if (!unlikely(schedtune_initialized))
385 return 0;
386
387
388 cgroup_taskset_for_each(task, css, tset) {
389
390 /*
391 * Lock the CPU's RQ the task is enqueued to avoid race
392 * conditions with migration code while the task is being
393 * accounted
394 */
395 rq = lock_rq_of(task, &irq_flags);
396
397 if (!task->on_rq) {
398 unlock_rq_of(rq, task, &irq_flags);
399 continue;
400 }
401
402 /*
403 * Boost group accouting is protected by a per-cpu lock and requires
404 * interrupt to be disabled to avoid race conditions on...
405 */
406 cpu = cpu_of(rq);
407 bg = &per_cpu(cpu_boost_groups, cpu);
408 raw_spin_lock(&bg->lock);
409
410 dst_bg = css_st(css)->idx;
411 src_bg = task_schedtune(task)->idx;
412
413 /*
414 * Current task is not changing boostgroup, which can
415 * happen when the new hierarchy is in use.
416 */
417 if (unlikely(dst_bg == src_bg)) {
418 raw_spin_unlock(&bg->lock);
419 unlock_rq_of(rq, task, &irq_flags);
420 continue;
421 }
422
423 /*
424 * This is the case of a RUNNABLE task which is switching its
425 * current boost group.
426 */
427
428 /* Move task from src to dst boost group */
429 tasks = bg->group[src_bg].tasks - 1;
430 bg->group[src_bg].tasks = max(0, tasks);
431 bg->group[dst_bg].tasks += 1;
432
433 raw_spin_unlock(&bg->lock);
434 unlock_rq_of(rq, task, &irq_flags);
435
436 /* Update CPU boost group */
437 if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
438 schedtune_cpu_update(task_cpu(task));
439
440 }
441
442 return 0;
443}
444
445void schedtune_cancel_attach(struct cgroup_taskset *tset)
446{
447 /* This can happen only if SchedTune controller is mounted with
448 * other hierarchies ane one of them fails. Since usually SchedTune is
449 * mouted on its own hierarcy, for the time being we do not implement
450 * a proper rollback mechanism */
451 WARN(1, "SchedTune cancel attach not implemented");
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100452}
453
454/*
455 * NOTE: This function must be called while holding the lock on the CPU RQ
456 */
457void schedtune_dequeue_task(struct task_struct *p, int cpu)
458{
Patrick Bellasid2489002016-07-28 18:44:40 +0100459 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
460 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100461 struct schedtune *st;
462 int idx;
463
Patrick Bellasid2489002016-07-28 18:44:40 +0100464 if (!unlikely(schedtune_initialized))
465 return;
466
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100467 /*
468 * When a task is marked PF_EXITING by do_exit() it's going to be
469 * dequeued and enqueued multiple times in the exit path.
470 * Thus we avoid any further update, since we do not want to change
471 * CPU boosting while the task is exiting.
Patrick Bellasid2489002016-07-28 18:44:40 +0100472 * The last dequeue is already enforce by the do_exit() code path
473 * via schedtune_exit_task().
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100474 */
475 if (p->flags & PF_EXITING)
476 return;
477
Patrick Bellasid2489002016-07-28 18:44:40 +0100478 /*
479 * Boost group accouting is protected by a per-cpu lock and requires
480 * interrupt to be disabled to avoid race conditions on...
481 */
482 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100483 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100484
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100485 st = task_schedtune(p);
486 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100487
Patrick Bellasid2489002016-07-28 18:44:40 +0100488 schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
489
490 rcu_read_unlock();
491 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
492}
493
494void schedtune_exit_task(struct task_struct *tsk)
495{
496 struct schedtune *st;
497 struct rq_flags irq_flags;
498 unsigned int cpu;
499 struct rq *rq;
500 int idx;
501
502 if (!unlikely(schedtune_initialized))
503 return;
504
505 rq = lock_rq_of(tsk, &irq_flags);
506 rcu_read_lock();
507
508 cpu = cpu_of(rq);
509 st = task_schedtune(tsk);
510 idx = st->idx;
511 schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
512
513 rcu_read_unlock();
514 unlock_rq_of(rq, tsk, &irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100515}
516
517int schedtune_cpu_boost(int cpu)
518{
519 struct boost_groups *bg;
520
521 bg = &per_cpu(cpu_boost_groups, cpu);
522 return bg->boost_max;
523}
524
Patrick Bellasi9b2b8da2016-01-14 18:31:53 +0000525int schedtune_task_boost(struct task_struct *p)
526{
527 struct schedtune *st;
528 int task_boost;
529
Chris Redpath293edee2017-03-27 18:20:20 +0100530 if (!unlikely(schedtune_initialized))
531 return 0;
532
Patrick Bellasi9b2b8da2016-01-14 18:31:53 +0000533 /* Get task boost value */
534 rcu_read_lock();
535 st = task_schedtune(p);
536 task_boost = st->boost;
537 rcu_read_unlock();
538
539 return task_boost;
540}
541
Srinath Sridharan42503db2016-07-14 13:09:03 -0700542int schedtune_prefer_idle(struct task_struct *p)
543{
544 struct schedtune *st;
545 int prefer_idle;
546
Chris Redpath293edee2017-03-27 18:20:20 +0100547 if (!unlikely(schedtune_initialized))
548 return 0;
549
Srinath Sridharan42503db2016-07-14 13:09:03 -0700550 /* Get prefer_idle value */
551 rcu_read_lock();
552 st = task_schedtune(p);
553 prefer_idle = st->prefer_idle;
554 rcu_read_unlock();
555
556 return prefer_idle;
557}
558
559static u64
560prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
561{
562 struct schedtune *st = css_st(css);
563
564 return st->prefer_idle;
565}
566
567static int
568prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
569 u64 prefer_idle)
570{
571 struct schedtune *st = css_st(css);
572 st->prefer_idle = prefer_idle;
573
574 return 0;
575}
576
Srinath Sridharane71c4252016-07-28 17:28:55 +0100577static s64
Patrick Bellasiae710302015-06-23 09:17:54 +0100578boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
579{
580 struct schedtune *st = css_st(css);
581
582 return st->boost;
583}
584
585static int
586boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
Srinath Sridharane71c4252016-07-28 17:28:55 +0100587 s64 boost)
Patrick Bellasiae710302015-06-23 09:17:54 +0100588{
589 struct schedtune *st = css_st(css);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100590 unsigned threshold_idx;
591 int boost_pct;
Patrick Bellasiae710302015-06-23 09:17:54 +0100592
Srinath Sridharane71c4252016-07-28 17:28:55 +0100593 if (boost < -100 || boost > 100)
Patrick Bellasiae710302015-06-23 09:17:54 +0100594 return -EINVAL;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100595 boost_pct = boost;
596
597 /*
598 * Update threshold params for Performance Boost (B)
599 * and Performance Constraint (C) regions.
600 * The current implementatio uses the same cuts for both
601 * B and C regions.
602 */
603 threshold_idx = clamp(boost_pct, 0, 99) / 10;
604 st->perf_boost_idx = threshold_idx;
605 st->perf_constrain_idx = threshold_idx;
Patrick Bellasiae710302015-06-23 09:17:54 +0100606
607 st->boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100608 if (css == &root_schedtune.css) {
Patrick Bellasiae710302015-06-23 09:17:54 +0100609 sysctl_sched_cfs_boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100610 perf_boost_idx = threshold_idx;
611 perf_constrain_idx = threshold_idx;
612 }
Patrick Bellasiae710302015-06-23 09:17:54 +0100613
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000614 /* Update CPU boost */
615 schedtune_boostgroup_update(st->idx, st->boost);
616
Patrick Bellasi050dcb82015-06-22 13:49:07 +0100617 trace_sched_tune_config(st->boost);
618
Patrick Bellasiae710302015-06-23 09:17:54 +0100619 return 0;
620}
621
622static struct cftype files[] = {
623 {
624 .name = "boost",
Srinath Sridharane71c4252016-07-28 17:28:55 +0100625 .read_s64 = boost_read,
626 .write_s64 = boost_write,
Patrick Bellasiae710302015-06-23 09:17:54 +0100627 },
Srinath Sridharan42503db2016-07-14 13:09:03 -0700628 {
629 .name = "prefer_idle",
630 .read_u64 = prefer_idle_read,
631 .write_u64 = prefer_idle_write,
632 },
Patrick Bellasiae710302015-06-23 09:17:54 +0100633 { } /* terminate */
634};
635
636static int
637schedtune_boostgroup_init(struct schedtune *st)
638{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000639 struct boost_groups *bg;
640 int cpu;
641
Patrick Bellasiae710302015-06-23 09:17:54 +0100642 /* Keep track of allocated boost groups */
643 allocated_group[st->idx] = st;
644
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000645 /* Initialize the per CPU boost groups */
646 for_each_possible_cpu(cpu) {
647 bg = &per_cpu(cpu_boost_groups, cpu);
648 bg->group[st->idx].boost = 0;
649 bg->group[st->idx].tasks = 0;
Srinath Sridharanc5616f22016-11-08 14:53:44 -0800650 raw_spin_lock_init(&bg->lock);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000651 }
652
Patrick Bellasiae710302015-06-23 09:17:54 +0100653 return 0;
654}
655
Patrick Bellasiae710302015-06-23 09:17:54 +0100656static struct cgroup_subsys_state *
657schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
658{
659 struct schedtune *st;
660 int idx;
661
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100662 if (!parent_css)
Patrick Bellasiae710302015-06-23 09:17:54 +0100663 return &root_schedtune.css;
Patrick Bellasiae710302015-06-23 09:17:54 +0100664
665 /* Allow only single level hierachies */
666 if (parent_css != &root_schedtune.css) {
667 pr_err("Nested SchedTune boosting groups not allowed\n");
668 return ERR_PTR(-ENOMEM);
669 }
670
671 /* Allow only a limited number of boosting groups */
672 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
673 if (!allocated_group[idx])
674 break;
675 if (idx == BOOSTGROUPS_COUNT) {
676 pr_err("Trying to create more than %d SchedTune boosting groups\n",
677 BOOSTGROUPS_COUNT);
678 return ERR_PTR(-ENOSPC);
679 }
680
681 st = kzalloc(sizeof(*st), GFP_KERNEL);
682 if (!st)
683 goto out;
684
685 /* Initialize per CPUs boost group support */
686 st->idx = idx;
687 if (schedtune_boostgroup_init(st))
688 goto release;
689
690 return &st->css;
691
692release:
693 kfree(st);
694out:
695 return ERR_PTR(-ENOMEM);
696}
697
698static void
699schedtune_boostgroup_release(struct schedtune *st)
700{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000701 /* Reset this boost group */
702 schedtune_boostgroup_update(st->idx, 0);
703
Patrick Bellasiae710302015-06-23 09:17:54 +0100704 /* Keep track of allocated boost groups */
705 allocated_group[st->idx] = NULL;
706}
707
708static void
709schedtune_css_free(struct cgroup_subsys_state *css)
710{
711 struct schedtune *st = css_st(css);
712
713 schedtune_boostgroup_release(st);
714 kfree(st);
715}
716
717struct cgroup_subsys schedtune_cgrp_subsys = {
718 .css_alloc = schedtune_css_alloc,
719 .css_free = schedtune_css_free,
Patrick Bellasid2489002016-07-28 18:44:40 +0100720 .can_attach = schedtune_can_attach,
721 .cancel_attach = schedtune_cancel_attach,
Patrick Bellasiae710302015-06-23 09:17:54 +0100722 .legacy_cftypes = files,
723 .early_init = 1,
724};
725
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100726static inline void
727schedtune_init_cgroups(void)
728{
729 struct boost_groups *bg;
730 int cpu;
731
732 /* Initialize the per CPU boost groups */
733 for_each_possible_cpu(cpu) {
734 bg = &per_cpu(cpu_boost_groups, cpu);
735 memset(bg, 0, sizeof(struct boost_groups));
Ke Wang751e5092016-11-25 13:38:45 +0800736 raw_spin_lock_init(&bg->lock);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100737 }
738
739 pr_info("schedtune: configured to support %d boost groups\n",
740 BOOSTGROUPS_COUNT);
Patrick Bellasi82ab2432016-08-24 11:02:29 +0100741
742 schedtune_initialized = true;
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100743}
744
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000745#else /* CONFIG_CGROUP_SCHEDTUNE */
746
747int
748schedtune_accept_deltas(int nrg_delta, int cap_delta,
749 struct task_struct *task)
750{
751 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000752 if (nrg_delta < 0 && cap_delta > 0) {
753 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000754 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000755 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000756
757 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000758 if (nrg_delta > 0 && cap_delta < 0) {
759 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000760 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000761 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000762
763 return __schedtune_accept_deltas(nrg_delta, cap_delta,
764 perf_boost_idx, perf_constrain_idx);
765}
766
Patrick Bellasiae710302015-06-23 09:17:54 +0100767#endif /* CONFIG_CGROUP_SCHEDTUNE */
768
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100769int
770sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
771 void __user *buffer, size_t *lenp,
772 loff_t *ppos)
773{
774 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100775 unsigned threshold_idx;
776 int boost_pct;
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100777
778 if (ret || !write)
779 return ret;
780
Patrick Bellasid5563d32016-07-29 15:32:26 +0100781 if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
782 return -EINVAL;
783 boost_pct = sysctl_sched_cfs_boost;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000784
Patrick Bellasid5563d32016-07-29 15:32:26 +0100785 /*
786 * Update threshold params for Performance Boost (B)
787 * and Performance Constraint (C) regions.
788 * The current implementatio uses the same cuts for both
789 * B and C regions.
790 */
791 threshold_idx = clamp(boost_pct, 0, 99) / 10;
792 perf_boost_idx = threshold_idx;
793 perf_constrain_idx = threshold_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000794
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100795 return 0;
796}
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000797
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000798#ifdef CONFIG_SCHED_DEBUG
799static void
800schedtune_test_nrg(unsigned long delta_pwr)
801{
802 unsigned long test_delta_pwr;
803 unsigned long test_norm_pwr;
804 int idx;
805
806 /*
807 * Check normalization constants using some constant system
808 * energy values
809 */
810 pr_info("schedtune: verify normalization constants...\n");
811 for (idx = 0; idx < 6; ++idx) {
812 test_delta_pwr = delta_pwr >> idx;
813
814 /* Normalize on max energy for target platform */
815 test_norm_pwr = reciprocal_divide(
816 test_delta_pwr << SCHED_CAPACITY_SHIFT,
817 schedtune_target_nrg.rdiv);
818
819 pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
820 idx, test_delta_pwr, test_norm_pwr);
821 }
822}
823#else
824#define schedtune_test_nrg(delta_pwr)
825#endif
826
827/*
828 * Compute the min/max power consumption of a cluster and all its CPUs
829 */
830static void
831schedtune_add_cluster_nrg(
832 struct sched_domain *sd,
833 struct sched_group *sg,
834 struct target_nrg *ste)
835{
836 struct sched_domain *sd2;
837 struct sched_group *sg2;
838
839 struct cpumask *cluster_cpus;
840 char str[32];
841
842 unsigned long min_pwr;
843 unsigned long max_pwr;
844 int cpu;
845
846 /* Get Cluster energy using EM data for the first CPU */
847 cluster_cpus = sched_group_cpus(sg);
848 snprintf(str, 32, "CLUSTER[%*pbl]",
849 cpumask_pr_args(cluster_cpus));
850
851 min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
852 max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
853 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
854 str, min_pwr, max_pwr);
855
856 /*
857 * Keep track of this cluster's energy in the computation of the
858 * overall system energy
859 */
860 ste->min_power += min_pwr;
861 ste->max_power += max_pwr;
862
863 /* Get CPU energy using EM data for each CPU in the group */
864 for_each_cpu(cpu, cluster_cpus) {
865 /* Get a SD view for the specific CPU */
866 for_each_domain(cpu, sd2) {
867 /* Get the CPU group */
868 sg2 = sd2->groups;
869 min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
870 max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
871
872 ste->min_power += min_pwr;
873 ste->max_power += max_pwr;
874
875 snprintf(str, 32, "CPU[%d]", cpu);
876 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
877 str, min_pwr, max_pwr);
878
879 /*
880 * Assume we have EM data only at the CPU and
881 * the upper CLUSTER level
882 */
883 BUG_ON(!cpumask_equal(
884 sched_group_cpus(sg),
885 sched_group_cpus(sd2->parent->groups)
886 ));
887 break;
888 }
889 }
890}
891
892/*
893 * Initialize the constants required to compute normalized energy.
894 * The values of these constants depends on the EM data for the specific
895 * target system and topology.
896 * Thus, this function is expected to be called by the code
897 * that bind the EM to the topology information.
898 */
899static int
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100900schedtune_init(void)
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000901{
902 struct target_nrg *ste = &schedtune_target_nrg;
903 unsigned long delta_pwr = 0;
904 struct sched_domain *sd;
905 struct sched_group *sg;
906
907 pr_info("schedtune: init normalization constants...\n");
908 ste->max_power = 0;
909 ste->min_power = 0;
910
911 rcu_read_lock();
912
913 /*
914 * When EAS is in use, we always have a pointer to the highest SD
915 * which provides EM data.
916 */
917 sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
918 if (!sd) {
919 pr_info("schedtune: no energy model data\n");
920 goto nodata;
921 }
922
923 sg = sd->groups;
924 do {
925 schedtune_add_cluster_nrg(sd, sg, ste);
926 } while (sg = sg->next, sg != sd->groups);
927
928 rcu_read_unlock();
929
930 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
931 "SYSTEM", ste->min_power, ste->max_power);
932
933 /* Compute normalization constants */
934 delta_pwr = ste->max_power - ste->min_power;
935 ste->rdiv = reciprocal_value(delta_pwr);
936 pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
937 ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
938
939 schedtune_test_nrg(delta_pwr);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100940
941#ifdef CONFIG_CGROUP_SCHEDTUNE
942 schedtune_init_cgroups();
943#else
944 pr_info("schedtune: configured to support global boosting only\n");
945#endif
946
Patrick Bellasid8460c72016-10-13 17:31:24 +0100947 schedtune_spc_rdiv = reciprocal_value(100);
948
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000949 return 0;
950
951nodata:
Patrick Bellasic0c5d552016-10-13 17:34:47 +0100952 pr_warning("schedtune: disabled!\n");
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000953 rcu_read_unlock();
954 return -EINVAL;
955}
Patrick Bellasif4725392016-07-29 16:09:03 +0100956postcore_initcall(schedtune_init);