Merge "Merge remote-tracking branch 'dev/msm-4.9-sched' into msm-4.9"
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 23a3b9a..4ad7cbd 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -714,10 +714,10 @@
 
 	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
 		 int target_cpu, bool sync, bool need_idle,
-		 bool placement_boost, int rtg_cpu),
+		 bool placement_boost, int rtg_cpu, u64 start_t),
 
 	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle,
-		placement_boost, rtg_cpu),
+		placement_boost, rtg_cpu, start_t),
 
 	TP_STRUCT__entry(
 		__field(int, pid			)
@@ -746,9 +746,7 @@
 		__entry->need_idle		= need_idle;
 		__entry->placement_boost	= placement_boost;
 		__entry->rtg_cpu		= rtg_cpu;
-		__entry->latency		= p->ravg.mark_start ?
-						  ktime_get_ns() -
-						  p->ravg.mark_start : 0;
+		__entry->latency		= (sched_clock() - start_t);
 	),
 
 	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 744b535..9f1d1d0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6814,12 +6814,33 @@
 	bool avoid_prev_cpu;
 };
 
+#ifdef CONFIG_SCHED_WALT
+static unsigned long cpu_estimated_capacity(int cpu, struct task_struct *p)
+{
+	unsigned long tutil, estimated_capacity;
+
+	if (task_in_cum_window_demand(cpu_rq(cpu), p))
+		tutil = 0;
+	else
+		tutil = task_util(p);
+
+	estimated_capacity = cpu_util_cum(cpu, tutil);
+
+	return estimated_capacity;
+}
+#else
+static unsigned long cpu_estimated_capacity(int cpu, struct task_struct *p)
+{
+	return cpu_util_wake(cpu, p);
+}
+#endif
+
 static bool is_packing_eligible(struct task_struct *p, int target_cpu,
 				struct find_best_target_env *fbt_env,
 				unsigned int target_cpus_count,
 				int best_idle_cstate)
 {
-	unsigned long tutil, estimated_capacity;
+	unsigned long estimated_capacity;
 
 	if (fbt_env->placement_boost || fbt_env->need_idle)
 		return false;
@@ -6830,12 +6851,7 @@
 	if (target_cpus_count != 1)
 		return true;
 
-	if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
-		tutil = 0;
-	else
-		tutil = task_util(p);
-
-	estimated_capacity = cpu_util_cum(target_cpu, tutil);
+	estimated_capacity = cpu_estimated_capacity(target_cpu, p);
 	estimated_capacity = add_capacity_margin(estimated_capacity,
 						 target_cpu);
 
@@ -6874,6 +6890,7 @@
 	return walt_start_cpu(start_cpu);
 }
 
+unsigned int sched_smp_overlap_capacity;
 static inline int find_best_target(struct task_struct *p, int *backup_cpu,
 				   bool boosted, bool prefer_idle,
 				   struct find_best_target_env *fbt_env)
@@ -7273,6 +7290,7 @@
 	       task_fits_max(p, cpu);
 }
 
+#ifdef CONFIG_SCHED_WALT
 static inline struct cpumask *find_rtg_target(struct task_struct *p)
 {
 	struct related_thread_group *grp;
@@ -7293,6 +7311,12 @@
 
 	return rtg_target;
 }
+#else
+static inline struct cpumask *find_rtg_target(struct task_struct *p)
+{
+	return NULL;
+}
+#endif
 
 static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
 {
@@ -7303,6 +7327,10 @@
 	int next_cpu = -1;
 	struct cpumask *rtg_target = find_rtg_target(p);
 	struct find_best_target_env fbt_env;
+	u64 start_t = 0;
+
+	if (trace_sched_task_util_enabled())
+		start_t = sched_clock();
 
 	schedstat_inc(p->se.statistics.nr_wakeups_secb_attempts);
 	schedstat_inc(this_rq()->eas_stats.secb_attempts);
@@ -7427,7 +7455,8 @@
 unlock:
 	trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
 			      fbt_env.need_idle, fbt_env.placement_boost,
-			      rtg_target ? cpumask_first(rtg_target) : -1);
+			      rtg_target ? cpumask_first(rtg_target) : -1,
+			      start_t);
 	rcu_read_unlock();
 	return target_cpu;
 }
@@ -10871,6 +10900,24 @@
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_WALT
+static inline void
+walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
+{
+	bool misfit = rq->misfit_task;
+
+	if (curr->misfit != misfit) {
+		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
+		curr->misfit = misfit;
+	}
+}
+#else
+static inline void
+walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
+{
+}
+#endif
+
 /*
  * scheduler tick hitting a task of our scheduling class:
  */
@@ -10878,10 +10925,6 @@
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &curr->se;
-#ifdef CONFIG_SMP
-	bool old_misfit = curr->misfit;
-	bool misfit;
-#endif
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
@@ -10897,15 +10940,9 @@
 		trace_sched_overutilized(true);
 	}
 
-	misfit = !task_fits_max(curr, rq->cpu);
-	rq->misfit_task = misfit;
-
-	if (old_misfit != misfit) {
-		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
-		curr->misfit = misfit;
-	}
+	rq->misfit_task = !task_fits_max(curr, rq->cpu);
 #endif
-
+	walt_update_misfit_task(rq, curr);
 }
 
 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8329e9c..4932f1d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1112,6 +1112,11 @@
 	SCHED_BOOST_ON_ALL,
 };
 
+#define NO_BOOST 0
+#define FULL_THROTTLE_BOOST 1
+#define CONSERVATIVE_BOOST 2
+#define RESTRAINED_BOOST 3
+
 /*
  * Returns the rq capacity of any rq in a group. This does not play
  * well with groups where rq capacity can change independently.
@@ -1910,6 +1915,9 @@
 	return cpu_util_freq_pelt(cpu);
 }
 
+#define sched_ravg_window TICK_NSEC
+#define sysctl_sched_use_walt_cpu_util 0
+
 #endif /* CONFIG_SCHED_WALT */
 
 extern unsigned long
@@ -2367,11 +2375,6 @@
 extern void add_new_task_to_grp(struct task_struct *new);
 extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
 
-#define NO_BOOST 0
-#define FULL_THROTTLE_BOOST 1
-#define CONSERVATIVE_BOOST 2
-#define RESTRAINED_BOOST 3
-
 static inline int cpu_capacity(int cpu)
 {
 	return cpu_rq(cpu)->cluster->capacity;
@@ -2789,6 +2792,11 @@
 
 static inline void clear_walt_request(int cpu) { }
 
+static inline int is_reserved(int cpu)
+{
+	return 0;
+}
+
 static inline int got_boost_kick(void)
 {
 	return 0;
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index bdcd174..192e8c7 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -753,6 +753,10 @@
 		sync_cgroup_colocation(task, colocate);
 
 }
+#else
+static void schedtune_attach(struct cgroup_taskset *tset)
+{
+}
 #endif
 
 static int
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index b4d815c..33daa99 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2193,7 +2193,6 @@
 
 int __read_mostly min_power_cpu;
 
-unsigned int sched_smp_overlap_capacity;
 void walt_sched_energy_populated_callback(void)
 {
 	struct sched_cluster *cluster;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 414c4ae..be06e7d4 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -181,7 +181,6 @@
 {
 	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
 }
-#define walt_cpu_high_irqload(cpu) sched_cpu_high_irqload(cpu)
 
 static inline int exiting_task(struct task_struct *p)
 {
@@ -378,6 +377,12 @@
 	return prev_cpu;
 }
 
+static inline u64 sched_irqload(int cpu)
+{
+	return 0;
+}
 #endif /* CONFIG_SCHED_WALT */
 
+#define walt_cpu_high_irqload(cpu) sched_cpu_high_irqload(cpu)
+
 #endif