sched: add tracing support for bias to waker and prev cpu

Update task_util trace event to trace biased to waker and prev cpu.

Change-Id: I12bb1a6b93eb8dd15f0ecc5f14904b2fd4655fda
Signed-off-by: Lingutla Chandrasekhar <clingutla@codeaurora.org>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e06df4d..b355ebf 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -713,10 +713,10 @@
 TRACE_EVENT(sched_task_util,
 
 	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
-		 int target_cpu, bool sync, bool need_idle,
+		 int target_cpu, bool sync, bool need_idle, int fastpath,
 		 bool placement_boost, int rtg_cpu, u64 start_t),
 
-	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
+	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath,
 		placement_boost, rtg_cpu, start_t),
 
 	TP_STRUCT__entry(
@@ -729,6 +729,7 @@
 		__field(int, target_cpu			)
 		__field(bool, sync			)
 		__field(bool, need_idle			)
+		__field(int, fastpath			)
 		__field(bool, placement_boost		)
 		__field(int, rtg_cpu			)
 		__field(u64, latency			)
@@ -744,13 +745,14 @@
 		__entry->target_cpu		= target_cpu;
 		__entry->sync			= sync;
 		__entry->need_idle		= need_idle;
+		__entry->fastpath		= fastpath;
 		__entry->placement_boost	= placement_boost;
 		__entry->rtg_cpu		= rtg_cpu;
 		__entry->latency		= (sched_clock() - start_t);
 	),
 
-	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
-		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->placement_boost, __entry->rtg_cpu, __entry->latency)
+	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
+		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle,  __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, __entry->latency)
 );
 
 #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 04ba6d0..112aeaa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7357,6 +7357,12 @@
 }
 #endif
 
+enum fastpaths {
+	NONE = 0,
+	SYNC_WAKEUP,
+	PREV_CPU_BIAS,
+};
+
 static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
 {
 	bool boosted, prefer_idle;
@@ -7367,6 +7373,7 @@
 	struct cpumask *rtg_target = find_rtg_target(p);
 	struct find_best_target_env fbt_env;
 	u64 start_t = 0;
+	int fastpath = 0;
 
 	if (trace_sched_task_util_enabled())
 		start_t = sched_clock();
@@ -7403,12 +7410,17 @@
 		if (bias_to_waker_cpu(p, cpu, rtg_target)) {
 			schedstat_inc(p->se.statistics.nr_wakeups_secb_sync);
 			schedstat_inc(this_rq()->eas_stats.secb_sync);
-			return cpu;
+			target_cpu = cpu;
+			fastpath = SYNC_WAKEUP;
+			goto out;
 		}
 	}
 
-	if (bias_to_prev_cpu(p, rtg_target))
-		return prev_cpu;
+	if (bias_to_prev_cpu(p, rtg_target)) {
+		target_cpu = prev_cpu;
+		fastpath = PREV_CPU_BIAS;
+		goto out;
+	}
 
 	rcu_read_lock();
 
@@ -7495,11 +7507,12 @@
 	schedstat_inc(this_rq()->eas_stats.secb_count);
 
 unlock:
-	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
-			      fbt_env.need_idle, fbt_env.placement_boost,
-			      rtg_target ? cpumask_first(rtg_target) : -1,
-			      start_t);
 	rcu_read_unlock();
+out:
+	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
+			      fbt_env.need_idle, fastpath,
+			      fbt_env.placement_boost, rtg_target ?
+			      cpumask_first(rtg_target) : -1, start_t);
 	return target_cpu;
 }