net_sched: cleanups

Cleanup net/sched code to current CodingStyle and practices.

Reduce inline abuse

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 51dcc2a..b9493a0 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -32,8 +32,7 @@
 struct gred_sched_data;
 struct gred_sched;
 
-struct gred_sched_data
-{
+struct gred_sched_data {
 	u32		limit;		/* HARD maximal queue length	*/
 	u32      	DP;		/* the drop pramaters */
 	u32		bytesin;	/* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@
 	GRED_RIO_MODE,
 };
 
-struct gred_sched
-{
+struct gred_sched {
 	struct gred_sched_data *tab[MAX_DPs];
 	unsigned long	flags;
 	u32		red_flags;
@@ -150,17 +148,18 @@
 	return t->red_flags & TC_RED_HARDDROP;
 }
 
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-	struct gred_sched_data *q=NULL;
-	struct gred_sched *t= qdisc_priv(sch);
+	struct gred_sched_data *q = NULL;
+	struct gred_sched *t = qdisc_priv(sch);
 	unsigned long qavg = 0;
 	u16 dp = tc_index_to_dp(skb);
 
-	if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
+	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 		dp = t->def;
 
-		if ((q = t->tab[dp]) == NULL) {
+		q = t->tab[dp];
+		if (!q) {
 			/* Pass through packets not assigned to a DP
 			 * if no default DP has been configured. This
 			 * allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@
 		for (i = 0; i < t->DPs; i++) {
 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
 			    !red_is_idling(&t->tab[i]->parms))
-				qavg +=t->tab[i]->parms.qavg;
+				qavg += t->tab[i]->parms.qavg;
 		}
 
 	}
@@ -203,28 +202,28 @@
 		gred_store_wred_set(t, q);
 
 	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
-		case RED_DONT_MARK:
-			break;
+	case RED_DONT_MARK:
+		break;
 
-		case RED_PROB_MARK:
-			sch->qstats.overlimits++;
-			if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
-				q->stats.prob_drop++;
-				goto congestion_drop;
-			}
+	case RED_PROB_MARK:
+		sch->qstats.overlimits++;
+		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+			q->stats.prob_drop++;
+			goto congestion_drop;
+		}
 
-			q->stats.prob_mark++;
-			break;
+		q->stats.prob_mark++;
+		break;
 
-		case RED_HARD_MARK:
-			sch->qstats.overlimits++;
-			if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
-			    !INET_ECN_set_ce(skb)) {
-				q->stats.forced_drop++;
-				goto congestion_drop;
-			}
-			q->stats.forced_mark++;
-			break;
+	case RED_HARD_MARK:
+		sch->qstats.overlimits++;
+		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+		    !INET_ECN_set_ce(skb)) {
+			q->stats.forced_drop++;
+			goto congestion_drop;
+		}
+		q->stats.forced_mark++;
+		break;
 	}
 
 	if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@
 	return NET_XMIT_CN;
 }
 
-static struct sk_buff *gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
 {
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 			if (net_ratelimit())
-				printk(KERN_WARNING "GRED: Unable to relocate "
-				       "VQ 0x%x after dequeue, screwing up "
-				       "backlog.\n", tc_index_to_dp(skb));
+				pr_warning("GRED: Unable to relocate VQ 0x%x "
+					   "after dequeue, screwing up "
+					   "backlog.\n", tc_index_to_dp(skb));
 		} else {
 			q->backlog -= qdisc_pkt_len(skb);
 
@@ -273,7 +272,7 @@
 	return NULL;
 }
 
-static unsigned int gred_drop(struct Qdisc* sch)
+static unsigned int gred_drop(struct Qdisc *sch)
 {
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 			if (net_ratelimit())
-				printk(KERN_WARNING "GRED: Unable to relocate "
-				       "VQ 0x%x while dropping, screwing up "
-				       "backlog.\n", tc_index_to_dp(skb));
+				pr_warning("GRED: Unable to relocate VQ 0x%x "
+					   "while dropping, screwing up "
+					   "backlog.\n", tc_index_to_dp(skb));
 		} else {
 			q->backlog -= len;
 			q->stats.other++;
@@ -308,7 +307,7 @@
 
 }
 
-static void gred_reset(struct Qdisc* sch)
+static void gred_reset(struct Qdisc *sch)
 {
 	int i;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@
 
 	for (i = table->DPs; i < MAX_DPs; i++) {
 		if (table->tab[i]) {
-			printk(KERN_WARNING "GRED: Warning: Destroying "
-			       "shadowed VQ 0x%x\n", i);
+			pr_warning("GRED: Warning: Destroying "
+				   "shadowed VQ 0x%x\n", i);
 			gred_destroy_vq(table->tab[i]);
 			table->tab[i] = NULL;
 		}