sched: Get sched_task_util trace point working for !SCHED_WALT

sched_task_util trace point depends on task's mark_start for
calculating the time taken for CPU selection algorithm in
select_energy_cpu_brute(). This results in a compilation error,
since the mark_start is not available when SCHED_WALT is disabled.

sched_task_util trace point is not tied to WALT. Fix this issue by
using sched_clock() instead of mark_start. The sched_clock() is
accessed only when the trace point is enabled, so there will not be
any additional overhead.

Change-Id: Ide67741a188da13911929422c0bb1b5af2d29826
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 23a3b9a..4ad7cbd 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -714,10 +714,10 @@
 
 	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
 		 int target_cpu, bool sync, bool need_idle,
-		 bool placement_boost, int rtg_cpu),
+		 bool placement_boost, int rtg_cpu, u64 start_t),
 
 	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
-		placement_boost, rtg_cpu),
+		placement_boost, rtg_cpu, start_t),
 
 	TP_STRUCT__entry(
 		__field(int, pid			)
@@ -746,9 +746,7 @@
 		__entry->need_idle		= need_idle;
 		__entry->placement_boost	= placement_boost;
 		__entry->rtg_cpu		= rtg_cpu;
-		__entry->latency		= p->ravg.mark_start ?
-						  ktime_get_ns() -
-						  p->ravg.mark_start : 0;
+		__entry->latency		= (sched_clock() - start_t);
 	),
 
 	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 744b535..a4a14d2d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7303,6 +7303,10 @@
 	int next_cpu = -1;
 	struct cpumask *rtg_target = find_rtg_target(p);
 	struct find_best_target_env fbt_env;
+	u64 start_t = 0;
+
+	if (trace_sched_task_util_enabled())
+		start_t = sched_clock();
 
 	schedstat_inc(p->se.statistics.nr_wakeups_secb_attempts);
 	schedstat_inc(this_rq()->eas_stats.secb_attempts);
@@ -7427,7 +7431,8 @@
 unlock:
 	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
 			      fbt_env.need_idle, fbt_env.placement_boost,
-			      rtg_target ? cpumask_first(rtg_target) : -1);
+			      rtg_target ? cpumask_first(rtg_target) : -1,
+			      start_t);
 	rcu_read_unlock();
 	return target_cpu;
 }