cpufreq: interactive: adjust load for changes in speed

Add notifier for speed transitions.  Keep a count of CPU active
microseconds times current frequency, converted to a percentage relative
to the current frequency when load is evaluated.

Change-Id: I5c27adb11081c50490219784ca57cc46e97fc28c
Signed-off-by: Todd Poynor <toddpoynor@google.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 3d8e7b4..d0d51ee 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -41,8 +41,11 @@
 struct cpufreq_interactive_cpuinfo {
 	struct timer_list cpu_timer;
 	int timer_idlecancel;
+	spinlock_t load_lock; /* protects the next 4 fields */
 	u64 time_in_idle;
 	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
 	struct cpufreq_policy *policy;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int target_freq;
@@ -121,9 +124,13 @@
 {
 	mod_timer_pinned(&pcpu->cpu_timer,
 			 jiffies + usecs_to_jiffies(timer_rate));
+	spin_lock(&pcpu->load_lock);
 	pcpu->time_in_idle =
 		get_cpu_idle_time_us(smp_processor_id(),
 				     &pcpu->time_in_idle_timestamp);
+	pcpu->cputime_speedadj = 0;
+	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	spin_unlock(&pcpu->load_lock);
 }
 
 static unsigned int freq_to_targetload(unsigned int freq)
@@ -148,10 +155,9 @@
  */
 
 static unsigned int choose_freq(
-	struct cpufreq_interactive_cpuinfo *pcpu, unsigned int curload)
+	struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
 {
 	unsigned int freq = pcpu->policy->cur;
-	unsigned int loadadjfreq = freq * curload;
 	unsigned int prevfreq, freqmin, freqmax;
 	unsigned int tl;
 	int index;
@@ -230,16 +236,36 @@
 	return freq;
 }
 
+static u64 update_load(int cpu)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
+
+	now_idle = get_cpu_idle_time_us(cpu, &now);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+	active_time = delta_time - delta_idle;
+	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
+	return now;
+}
+
 static void cpufreq_interactive_timer(unsigned long data)
 {
 	u64 now;
-	unsigned int delta_idle;
 	unsigned int delta_time;
+	u64 cputime_speedadj;
 	int cpu_load;
 	struct cpufreq_interactive_cpuinfo *pcpu =
 		&per_cpu(cpuinfo, data);
-	u64 now_idle;
 	unsigned int new_freq;
+	unsigned int loadadjfreq;
 	unsigned int index;
 	unsigned long flags;
 
@@ -248,26 +274,24 @@
 	if (!pcpu->governor_enabled)
 		goto exit;
 
-	now_idle = get_cpu_idle_time_us(data, &now);
-	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
-	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+	spin_lock(&pcpu->load_lock);
+	now = update_load(data);
+	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+	cputime_speedadj = pcpu->cputime_speedadj;
+	spin_unlock(&pcpu->load_lock);
 
-	/*
-	 * If timer ran less than 1ms after short-term sample started, retry.
-	 */
-	if (delta_time < 1000)
+	if (WARN_ON_ONCE(!delta_time))
 		goto rearm;
 
-	if (delta_idle > delta_time)
-		cpu_load = 0;
-	else
-		cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+	do_div(cputime_speedadj, delta_time);
+	loadadjfreq = (unsigned int)cputime_speedadj * 100;
+	cpu_load = loadadjfreq / pcpu->target_freq;
 
 	if ((cpu_load >= go_hispeed_load || boost_val) &&
 	    pcpu->target_freq < hispeed_freq)
 		new_freq = hispeed_freq;
 	else
-		new_freq = choose_freq(pcpu, cpu_load);
+		new_freq = choose_freq(pcpu, loadadjfreq);
 
 	if (pcpu->target_freq >= hispeed_freq &&
 	    new_freq > pcpu->target_freq &&
@@ -498,6 +522,32 @@
 		wake_up_process(speedchange_task);
 }
 
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	int cpu;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		pcpu = &per_cpu(cpuinfo, freq->cpu);
+
+		for_each_cpu(cpu, pcpu->policy->cpus) {
+			struct cpufreq_interactive_cpuinfo *pjcpu =
+				&per_cpu(cpuinfo, cpu);
+			spin_lock(&pjcpu->load_lock);
+			update_load(cpu);
+			spin_unlock(&pjcpu->load_lock);
+		}
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+	.notifier_call = cpufreq_interactive_notifier,
+};
+
 static ssize_t show_target_loads(
 	struct kobject *kobj, struct attribute *attr, char *buf)
 {
@@ -817,6 +867,8 @@
 			return rc;
 
 		idle_notifier_register(&cpufreq_interactive_idle_nb);
+		cpufreq_register_notifier(
+			&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
 		break;
 
 	case CPUFREQ_GOV_STOP:
@@ -830,6 +882,8 @@
 		if (atomic_dec_return(&active_count) > 0)
 			return 0;
 
+		cpufreq_unregister_notifier(
+			&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
 		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
 		sysfs_remove_group(cpufreq_global_kobject,
 				&interactive_attr_group);
@@ -868,6 +922,7 @@
 			init_timer_deferrable(&pcpu->cpu_timer);
 		pcpu->cpu_timer.function = cpufreq_interactive_timer;
 		pcpu->cpu_timer.data = i;
+		spin_lock_init(&pcpu->load_lock);
 	}
 
 	spin_lock_init(&target_loads_lock);