sched: use cum_window_demand for rt task placement
Introduce C-state awareness to RT task placement to minimize idle
CPU wakeups.
Change-Id: I72ab2598f58980b24670e0a44dad61d1a484c4ae
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a4ee65c..c0db644 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1824,6 +1824,18 @@
}
#endif /* CONFIG_SCHED_HMP */
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ u64 demand = p->ravg.demand;
+
+ return (demand << 10) / sched_ravg_window;
+ }
+#endif
+ return p->se.avg.util_avg;
+}
+
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
@@ -1834,6 +1846,11 @@
struct cpumask search_cpu, backup_search_cpu;
unsigned long cpu_capacity, capacity = ULONG_MAX;
unsigned long util, best_cpu_util = ULONG_MAX;
+ int best_cpu_idle_idx = INT_MAX;
+ int cpu_idle_idx = -1;
+ long new_util_cum;
+ int max_spare_cap_cpu = -1;
+ long max_spare_cap = -LONG_MAX;
#ifdef CONFIG_SCHED_HMP
return find_lowest_rq_hmp(task);
@@ -1888,17 +1905,51 @@
if (sched_cpu_high_irqload(cpu))
continue;
- if (best_cpu_util > util ||
- (best_cpu_util == util &&
- cpu == task_cpu(task))) {
- best_cpu_util = util;
- best_cpu = cpu;
+ new_util_cum = cpu_util_cum(cpu, 0);
+
+ if (!task_in_cum_window_demand(cpu_rq(cpu),
+ task))
+ new_util_cum += task_util(task);
+
+ trace_sched_cpu_util(task, cpu, task_util(task),
+ 0, new_util_cum, 0);
+
+ if (sysctl_sched_cstate_aware)
+ cpu_idle_idx =
+ (cpu == smp_processor_id() ||
+ cpu_rq(cpu)->nr_running) ?
+ -1 :
+ idle_get_state_idx(cpu_rq(cpu));
+
+ if (add_capacity_margin(new_util_cum) <
+ capacity_curr_of(cpu)) {
+ if (cpu_idle_idx < best_cpu_idle_idx ||
+ (best_cpu != task_cpu(task) &&
+ (best_cpu_idle_idx ==
+ cpu_idle_idx &&
+ best_cpu_util > util))) {
+ best_cpu_util = util;
+ best_cpu = cpu;
+ best_cpu_idle_idx =
+ cpu_idle_idx;
+ }
+ } else {
+ long spare_cap = capacity_of(cpu) -
+ util;
+
+ if (spare_cap > 0 &&
+ max_spare_cap < spare_cap) {
+ max_spare_cap_cpu = cpu;
+ max_spare_cap = spare_cap;
+ }
}
}
}
if (best_cpu != -1) {
return best_cpu;
+ } else if (max_spare_cap_cpu != -1) {
+ return max_spare_cap_cpu;
} else if (!cpumask_empty(&backup_search_cpu)) {
cpumask_copy(&search_cpu, &backup_search_cpu);
cpumask_clear(&backup_search_cpu);