cpufreq: interactive: Use sched_get_cpus_busy() to query busy time
sched_get_cpus_busy() provides a snapshot of all CPUs' busy time
information for the set of CPUs being queried. This avoids race
condition due to migration when CPU load is queried one by one.
Change-Id: I6afdfa74ff9f3ef616872df4e2c3bb04f6233c3f
Signed-off-by: Junjie Wu <junjiew@codeaurora.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 31151f0..7a26297 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -52,6 +52,7 @@
bool reject_notification;
int governor_enabled;
struct cpufreq_interactive_tunables *cached_tunables;
+ unsigned long *cpu_busy_times;
};
/* Protected by per-policy load_lock */
@@ -442,7 +443,7 @@
unsigned int index;
unsigned long flags;
unsigned long max_cpu;
- int i;
+ int i, fcpu;
struct cpufreq_govinfo govinfo;
if (!down_read_trylock(&ppol->enable_sem))
@@ -450,16 +451,20 @@
if (!ppol->governor_enabled)
goto exit;
+ fcpu = cpumask_first(ppol->policy->related_cpus);
now = ktime_to_us(ktime_get());
spin_lock_irqsave(&ppol->load_lock, flags);
ppol->last_evaluated_jiffy = get_jiffies_64();
+ if (tunables->use_sched_load)
+ sched_get_cpus_busy(ppol->cpu_busy_times,
+ ppol->policy->related_cpus);
max_cpu = cpumask_first(ppol->policy->cpus);
for_each_cpu(i, ppol->policy->cpus) {
pcpu = &per_cpu(cpuinfo, i);
if (tunables->use_sched_load) {
- cputime_speedadj = (u64)sched_get_busy(i) *
- ppol->policy->cpuinfo.max_freq;
+ cputime_speedadj = (u64)ppol->cpu_busy_times[i - fcpu]
+ * ppol->policy->cpuinfo.max_freq;
do_div(cputime_speedadj, tunables->timer_rate);
} else {
now = update_load(i);
@@ -1467,6 +1472,7 @@
struct cpufreq_interactive_policyinfo *ppol =
per_cpu(polinfo, policy->cpu);
int i;
+ unsigned long *busy;
/* polinfo already allocated for policy, return */
if (ppol)
@@ -1476,6 +1482,14 @@
if (!ppol)
return ERR_PTR(-ENOMEM);
+ busy = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*busy),
+ GFP_KERNEL);
+ if (!busy) {
+ kfree(ppol);
+ return ERR_PTR(-ENOMEM);
+ }
+ ppol->cpu_busy_times = busy;
+
init_timer_deferrable(&ppol->policy_timer);
ppol->policy_timer.function = cpufreq_interactive_timer;
init_timer(&ppol->policy_slack_timer);
@@ -1502,6 +1516,7 @@
if (per_cpu(polinfo, j) == ppol)
per_cpu(polinfo, cpu) = NULL;
kfree(ppol->cached_tunables);
+ kfree(ppol->cpu_busy_times);
kfree(ppol);
}