net: sched: Schedule PRIO qdisc when flow control released

The PRIO qdisc supports flow control, such that packet
dequeue can be disabled based on boolean flag 'enable_flow'.
When flow is re-enabled, the latency for new packets
arriving at network driver is high.  To reduce the delay in
scheduling packets, the qdisc will now invoke
__netif_schedule() to expedite dequeue.  This significantly
reduces the latency of packets arriving at network driver.

Change-Id: Ic5fe3faf86f177300d3018b9f60974ba3811641c
CRs-Fixed: 355156
Acked-by: Jimi Shah <jimis@qualcomm.com>
Signed-off-by: Tianyi Gou <tgou@codeaurora.org>
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8129d97..5e5ad91 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
@@ -176,6 +177,7 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct tc_prio_qopt *qopt;
 	int i;
+	int flow_change = 0;
 
 	if (nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
@@ -190,7 +192,10 @@
 	}
 
 	sch_tree_lock(sch);
-	q->enable_flow = qopt->enable_flow;
+	if (q->enable_flow != qopt->enable_flow) {
+		q->enable_flow = qopt->enable_flow;
+		flow_change = 1;
+	}
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
@@ -225,6 +230,13 @@
 			}
 		}
 	}
+
+	/* Schedule qdisc when flow re-enabled */
+	if (flow_change && q->enable_flow) {
+		if (!test_bit(__QDISC_STATE_DEACTIVATED,
+			      &sch->state))
+			__netif_schedule(qdisc_root(sch));
+	}
 	return 0;
 }