Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar:
"Misc fixes all around the map: an instrumentation fix, a nohz
usability fix, a lockdep annotation fix and two task group scheduling
fixes"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/core: Add missing lockdep_unpin() annotations
sched/deadline: Fix migration of SCHED_DEADLINE tasks
nohz: Revert "nohz: Set isolcpus when nohz_full is set"
sched/fair: Update task group's load_avg after task migration
sched/fair: Fix overly small weight for interactive group entities
sched, tracing: Stop/start critical timings around the idle=poll idle loop
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 10a8faa..bcd214e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2366,8 +2366,15 @@
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
+ if (p->sched_class->task_woken) {
+ /*
+ * Nothing relies on rq->lock after this, so its fine to
+ * drop it.
+ */
+ lockdep_unpin_lock(&rq->lock);
p->sched_class->task_woken(rq, p);
+ lockdep_pin_lock(&rq->lock);
+ }
#endif
task_rq_unlock(rq, p, &flags);
}
@@ -7238,9 +7245,6 @@
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
- /* nohz_full won't take effect without isolating the cpus. */
- tick_nohz_full_add_cpus_to(cpu_isolated_map);
-
sched_init_numa();
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fc8f010..8b0a15e 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -668,8 +668,15 @@
* Queueing this task back might have overloaded rq, check if we need
* to kick someone away.
*/
- if (has_pushable_dl_tasks(rq))
+ if (has_pushable_dl_tasks(rq)) {
+ /*
+ * Nothing relies on rq->lock after this, so its safe to drop
+ * rq->lock.
+ */
+ lockdep_unpin_lock(&rq->lock);
push_dl_task(rq);
+ lockdep_pin_lock(&rq->lock);
+ }
#endif
unlock:
@@ -1066,8 +1073,9 @@
int target = find_later_rq(p);
if (target != -1 &&
- dl_time_before(p->dl.deadline,
- cpu_rq(target)->dl.earliest_dl.curr))
+ (dl_time_before(p->dl.deadline,
+ cpu_rq(target)->dl.earliest_dl.curr) ||
+ (cpu_rq(target)->dl.dl_nr_running == 0)))
cpu = target;
}
rcu_read_unlock();
@@ -1417,7 +1425,8 @@
later_rq = cpu_rq(cpu);
- if (!dl_time_before(task->dl.deadline,
+ if (later_rq->dl.dl_nr_running &&
+ !dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr)) {
/*
* Target rq has tasks of equal or earlier deadline,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e2e348..9a5e60f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2363,7 +2363,7 @@
*/
tg_weight = atomic_long_read(&tg->load_avg);
tg_weight -= cfs_rq->tg_load_avg_contrib;
- tg_weight += cfs_rq_load_avg(cfs_rq);
+ tg_weight += cfs_rq->load.weight;
return tg_weight;
}
@@ -2373,7 +2373,7 @@
long tg_weight, load, shares;
tg_weight = calc_tg_weight(tg, cfs_rq);
- load = cfs_rq_load_avg(cfs_rq);
+ load = cfs_rq->load.weight;
shares = (tg->shares * load);
if (tg_weight)
@@ -2664,13 +2664,14 @@
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
- int decayed;
struct sched_avg *sa = &cfs_rq->avg;
+ int decayed, removed = 0;
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
sa->load_avg = max_t(long, sa->load_avg - r, 0);
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+ removed = 1;
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
@@ -2688,7 +2689,7 @@
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- return decayed;
+ return decayed || removed;
}
/* Update task and its cfs_rq load average */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8f177c7..4a2ef5a 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -57,9 +57,11 @@
rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
+ stop_critical_timings();
while (!tif_need_resched() &&
(cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
+ start_critical_timings();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
rcu_idle_exit();
return 1;