cpufreq: interactive: Pass target_load to scheduler
Scheduler needs to understand governor's target_load in order to make
correct decisions when scheduling tasks.
Change-Id: Ia440986de813632def0352e34425fa69da3b2923
Signed-off-by: Junjie Wu <junjiew@codeaurora.org>
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 4d09138..a909317 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -76,6 +76,7 @@
static int set_window_count;
static int migration_register_count;
static struct mutex sched_lock;
+static cpumask_t controlled_cpus;
/* Target load. Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 90
@@ -313,6 +314,25 @@
return ret;
}
+#define DEFAULT_MAX_LOAD 100
+u32 get_freq_max_load(int cpu, unsigned int freq)
+{
+ struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+
+ if (!cpumask_test_cpu(cpu, &controlled_cpus))
+ return DEFAULT_MAX_LOAD;
+
+ if (have_governor_per_policy()) {
+ if (!ppol || !ppol->cached_tunables)
+ return DEFAULT_MAX_LOAD;
+ return freq_to_targetload(ppol->cached_tunables, freq);
+ }
+
+ if (!cached_common_tunables)
+ return DEFAULT_MAX_LOAD;
+ return freq_to_targetload(cached_common_tunables, freq);
+}
+
/*
* If increasing frequencies never map to a lower target load then
* choose_freq() will find the minimum frequency that does not exceed its
@@ -874,6 +894,9 @@
tunables->target_loads = new_target_loads;
tunables->ntarget_loads = ntokens;
spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+
+ sched_update_freq_max_load(&controlled_cpus);
+
return count;
}
@@ -1571,6 +1594,9 @@
WARN_ON(tunables);
} else if (tunables) {
tunables->usage_count++;
+ cpumask_or(&controlled_cpus, &controlled_cpus,
+ policy->related_cpus);
+ sched_update_freq_max_load(policy->related_cpus);
policy->governor_data = tunables;
return 0;
}
@@ -1604,6 +1630,10 @@
if (tunables->use_sched_load)
cpufreq_interactive_enable_sched_input(tunables);
+ cpumask_or(&controlled_cpus, &controlled_cpus,
+ policy->related_cpus);
+ sched_update_freq_max_load(policy->related_cpus);
+
if (have_governor_per_policy())
ppol->cached_tunables = tunables;
else
@@ -1612,6 +1642,9 @@
break;
case CPUFREQ_GOV_POLICY_EXIT:
+ cpumask_andnot(&controlled_cpus, &controlled_cpus,
+ policy->related_cpus);
+ sched_update_freq_max_load(cpu_possible_mask);
if (!--tunables->usage_count) {
if (policy->governor->initialized == 1)
cpufreq_unregister_notifier(&cpufreq_notifier_block,