sched: avoid migrating when softint on tgt cpu should be short
The scheduling change (bug 31501544) to avoid putting RT threads on cores that
are handling softint's was catching cases where there was no reason
to believe the softint would take a long time, resulting in unnecessary
migration overhead. This patch reduces the migration to cases where
the core has a softint that is actually likely to take a long time,
as opposed to the RCU, SCHED, and TIMER softints that are rather quick.
Bug: 31752786
Change-Id: Ib4e179f1e15c736b2fdba31070494e357e9fbbe2
Git-commit: ce05770bd37b8065b61ef650108ecef2b97b148b
Git-repo: https://android.googlesource.com/kernel/msm
[pkondeti@codeaurora.org: resolved minor merge conflicts]
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index eeceac3..15e6882 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -478,6 +478,12 @@
};
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
+ (1 << NET_RX_SOFTIRQ) | \
+ (1 << BLOCK_SOFTIRQ) | \
+ (1 << IRQ_POLL_SOFTIRQ) | \
+ (1 << TASKLET_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -513,6 +519,7 @@
extern void raise_softirq(unsigned int nr);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a6bdd1f..42baee1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1467,16 +1467,20 @@
/*
* Return whether the task on the given cpu is currently non-preemptible
- * while handling a softirq or is likely to block preemptions soon because
- * it is a ksoftirq thread.
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
+ __u32 softirqs = per_cpu(active_softirqs, cpu) |
+ __IRQ_STAT(cpu, __softirq_pending);
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
- return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
- task == cpu_ksoftirqd;
+ return ((softirqs & LONG_SOFTIRQ_MASK) &&
+ (task == cpu_ksoftirqd ||
+ task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
static int
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6f58486..5341647 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,13 @@
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
+
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -272,6 +279,7 @@
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
+ __this_cpu_write(active_softirqs, pending);
local_irq_enable();
@@ -301,6 +309,7 @@
pending >>= softirq_bit;
}
+ __this_cpu_write(active_softirqs, 0);
rcu_bh_qs();
local_irq_disable();