cpufreq: governors: Remove code redundancy between governors

With the inclusion of following patches:

9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary

code redundancy between the conservative and ondemand governors is
introduced again, so get rid of it.

[rjw: Changelog]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 75efd5e..f38b8da 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,75 +216,44 @@
 	}
 }
 
-static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
-			    struct delayed_work *dw)
-{
-	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-	int delay, sample_type = dbs_info->sample_type;
-
-	/* Common NORMAL_SAMPLE setup */
-	dbs_info->sample_type = OD_NORMAL_SAMPLE;
-	if (sample_type == OD_SUB_SAMPLE) {
-		delay = dbs_info->freq_lo_jiffies;
-		if (sample)
-			__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
-						dbs_info->freq_lo,
-						CPUFREQ_RELATION_H);
-	} else {
-		if (sample)
-			dbs_check_cpu(&od_dbs_data, cpu);
-		if (dbs_info->freq_lo) {
-			/* Setup timer for SUB_SAMPLE */
-			dbs_info->sample_type = OD_SUB_SAMPLE;
-			delay = dbs_info->freq_hi_jiffies;
-		} else {
-			delay = delay_for_sampling_rate(od_tuners.sampling_rate
-						* dbs_info->rate_mult);
-		}
-	}
-
-	schedule_delayed_work_on(smp_processor_id(), dw, delay);
-}
-
-static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
-				 struct delayed_work *dw)
-{
-	struct od_cpu_dbs_info_s *dbs_info;
-	ktime_t time_now;
-	s64 delta_us;
-	bool sample = true;
-
-	/* use leader CPU's dbs_info */
-	dbs_info = &per_cpu(od_cpu_dbs_info,
-			    dbs_info_local->cdbs.cur_policy->cpu);
-	mutex_lock(&dbs_info->cdbs.timer_mutex);
-
-	time_now = ktime_get();
-	delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
-
-	/* Do nothing if we recently have sampled */
-	if (delta_us < (s64)(od_tuners.sampling_rate / 2))
-		sample = false;
-	else
-		dbs_info->cdbs.time_stamp = time_now;
-
-	od_timer_update(dbs_info, sample, dw);
-	mutex_unlock(&dbs_info->cdbs.timer_mutex);
-}
-
 static void od_dbs_timer(struct work_struct *work)
 {
 	struct delayed_work *dw = to_delayed_work(work);
 	struct od_cpu_dbs_info_s *dbs_info =
 		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
+	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+			cpu);
+	int delay, sample_type = core_dbs_info->sample_type;
+	bool eval_load;
 
-	if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
-		od_timer_coordinated(dbs_info, dw);
+	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+	eval_load = need_load_eval(&core_dbs_info->cdbs,
+			od_tuners.sampling_rate);
+
+	/* Common NORMAL_SAMPLE setup */
+	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+	if (sample_type == OD_SUB_SAMPLE) {
+		delay = core_dbs_info->freq_lo_jiffies;
+		if (eval_load)
+			__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+						core_dbs_info->freq_lo,
+						CPUFREQ_RELATION_H);
 	} else {
-		mutex_lock(&dbs_info->cdbs.timer_mutex);
-		od_timer_update(dbs_info, true, dw);
-		mutex_unlock(&dbs_info->cdbs.timer_mutex);
+		if (eval_load)
+			dbs_check_cpu(&od_dbs_data, cpu);
+		if (core_dbs_info->freq_lo) {
+			/* Setup timer for SUB_SAMPLE */
+			core_dbs_info->sample_type = OD_SUB_SAMPLE;
+			delay = core_dbs_info->freq_hi_jiffies;
+		} else {
+			delay = delay_for_sampling_rate(od_tuners.sampling_rate
+						* core_dbs_info->rate_mult);
+		}
 	}
+
+	schedule_delayed_work_on(smp_processor_id(), dw, delay);
+	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 }
 
 /************************** sysfs interface ************************/