pkt_sched: Remove 'dev' member of struct Qdisc.

It can be obtained via the netdev_queue.  So create a helper routine,
qdisc_dev(), to make the transformations nicer looking.

Now, qdisc_alloc() now no longer needs a net_device pointer argument.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 9360fc8..e2389f1 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -334,7 +334,7 @@
 	tcm->tcm_family = AF_UNSPEC;
 	tcm->tcm__pad1 = 0;
 	tcm->tcm__pad1 = 0;
-	tcm->tcm_ifindex = tp->q->dev->ifindex;
+	tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
 	tcm->tcm_parent = tp->classid;
 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
 	NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 784dcb8..5a16ca2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -302,7 +302,7 @@
 			*fp = f->next;
 			tcf_tree_unlock(tp);
 
-			route4_reset_fastmap(tp->q->dev, head, f->id);
+			route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
 			route4_delete_filter(tp, f);
 
 			/* Strip tree */
@@ -500,7 +500,7 @@
 	}
 	tcf_tree_unlock(tp);
 
-	route4_reset_fastmap(tp->q->dev, head, f->id);
+	route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
 	*arg = (unsigned long)f;
 	return 0;
 
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b86c98b..1f89308 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -281,7 +281,7 @@
 {
 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 						 timer);
-	struct net_device *dev = wd->qdisc->dev;
+	struct net_device *dev = qdisc_dev(wd->qdisc);
 
 	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 	smp_wmb();
@@ -493,7 +493,7 @@
 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
 			return;
 
-		sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
 		if (sch == NULL) {
 			WARN_ON(parentid != TC_H_ROOT);
 			return;
@@ -593,7 +593,7 @@
 	if (ops == NULL)
 		goto err_out;
 
-	sch = qdisc_alloc(dev, dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops);
 	if (IS_ERR(sch)) {
 		err = PTR_ERR(sch);
 		goto err_out2;
@@ -940,7 +940,7 @@
 	tcm->tcm_family = AF_UNSPEC;
 	tcm->tcm__pad1 = 0;
 	tcm->tcm__pad2 = 0;
-	tcm->tcm_ifindex = q->dev->ifindex;
+	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 	tcm->tcm_parent = clid;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = atomic_read(&q->refcnt);
@@ -1186,7 +1186,7 @@
 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
 	tcm = NLMSG_DATA(nlh);
 	tcm->tcm_family = AF_UNSPEC;
-	tcm->tcm_ifindex = q->dev->ifindex;
+	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 	tcm->tcm_parent = q->handle;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = 0;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 3dddab5..0de757e 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -296,7 +296,7 @@
 		goto err_out;
 	}
 	flow->filter_list = NULL;
-	flow->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 				    &pfifo_qdisc_ops, classid);
 	if (!flow->q)
 		flow->q = &noop_qdisc;
@@ -556,7 +556,7 @@
 
 	pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
 	p->flows = &p->link;
-	p->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 				      &pfifo_qdisc_ops, sch->handle);
 	if (!p->link.q)
 		p->link.q = &noop_qdisc;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d360dcd..9f2ace5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -650,7 +650,7 @@
 	}
 
 	sch->flags &= ~TCQ_F_THROTTLED;
-	netif_schedule(sch->dev);
+	netif_schedule(qdisc_dev(sch));
 	return HRTIMER_NORESTART;
 }
 
@@ -1077,9 +1077,9 @@
 				cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
 					q->quanta[prio];
 			}
-			if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) {
+			if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
 				printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
-				cl->quantum = cl->qdisc->dev->mtu/2 + 1;
+				cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
 			}
 		}
 	}
@@ -1401,7 +1401,7 @@
 	q->link.sibling = &q->link;
 	q->link.common.classid = sch->handle;
 	q->link.qdisc = sch;
-	if (!(q->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					    &pfifo_qdisc_ops,
 					    sch->handle)))
 		q->link.q = &noop_qdisc;
@@ -1411,7 +1411,7 @@
 	q->link.cpriority = TC_CBQ_MAXPRIO-1;
 	q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
 	q->link.overlimit = cbq_ovl_classic;
-	q->link.allot = psched_mtu(sch->dev);
+	q->link.allot = psched_mtu(qdisc_dev(sch));
 	q->link.quantum = q->link.allot;
 	q->link.weight = q->link.R_tab->rate.rate;
 
@@ -1646,7 +1646,7 @@
 
 	if (cl) {
 		if (new == NULL) {
-			new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+			new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 						&pfifo_qdisc_ops,
 						cl->common.classid);
 			if (new == NULL)
@@ -1746,10 +1746,10 @@
 #ifdef CONFIG_NET_CLS_ACT
 		struct cbq_sched_data *q = qdisc_priv(sch);
 
-		spin_lock_bh(&sch->dev->queue_lock);
+		spin_lock_bh(&qdisc_dev(sch)->queue_lock);
 		if (q->rx_class == cl)
 			q->rx_class = NULL;
-		spin_unlock_bh(&sch->dev->queue_lock);
+		spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 #endif
 
 		cbq_destroy_class(sch, cl);
@@ -1828,7 +1828,7 @@
 
 		if (tca[TCA_RATE])
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
-					      &sch->dev->queue_lock,
+					      &qdisc_dev(sch)->queue_lock,
 					      tca[TCA_RATE]);
 		return 0;
 	}
@@ -1879,7 +1879,7 @@
 	cl->R_tab = rtab;
 	rtab = NULL;
 	cl->refcnt = 1;
-	if (!(cl->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					&pfifo_qdisc_ops, classid)))
 		cl->q = &noop_qdisc;
 	cl->common.classid = classid;
@@ -1919,7 +1919,7 @@
 
 	if (tca[TCA_RATE])
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
-				  &sch->dev->queue_lock, tca[TCA_RATE]);
+				  &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
 
 	*arg = (unsigned long)cl;
 	return 0;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index c955ba2..3aafbd1 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -60,7 +60,7 @@
 		sch, p, new, old);
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+		new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					&pfifo_qdisc_ops,
 					sch->handle);
 		if (new == NULL)
@@ -391,7 +391,7 @@
 	p->default_index = default_index;
 	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-	p->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 				 &pfifo_qdisc_ops, sch->handle);
 	if (p->q == NULL)
 		p->q = &noop_qdisc;
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 779eae8..1d97fa4 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -48,10 +48,10 @@
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
 	if (opt == NULL) {
-		u32 limit = sch->dev->tx_queue_len ? : 1;
+		u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
 
 		if (sch->ops == &bfifo_qdisc_ops)
-			limit *= sch->dev->mtu;
+			limit *= qdisc_dev(sch)->mtu;
 
 		q->limit = limit;
 	} else {
@@ -137,7 +137,7 @@
 	struct Qdisc *q;
 	int err = -ENOMEM;
 
-	q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 			      ops, TC_H_MAKE(sch->handle, 1));
 	if (q) {
 		err = fifo_set_limit(q, limit);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d970864..b626a4f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -364,7 +364,7 @@
 {
 	struct sk_buff_head *list = prio2list(skb, qdisc);
 
-	if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
+	if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
 		qdisc->q.qlen++;
 		return __qdisc_enqueue_tail(skb, qdisc, list);
 	}
@@ -440,8 +440,7 @@
 	.owner		=	THIS_MODULE,
 };
 
-struct Qdisc *qdisc_alloc(struct net_device *dev,
-			  struct netdev_queue *dev_queue,
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 			  struct Qdisc_ops *ops)
 {
 	void *p;
@@ -465,8 +464,7 @@
 	sch->enqueue = ops->enqueue;
 	sch->dequeue = ops->dequeue;
 	sch->dev_queue = dev_queue;
-	sch->dev = dev;
-	dev_hold(dev);
+	dev_hold(qdisc_dev(sch));
 	atomic_set(&sch->refcnt, 1);
 
 	return sch;
@@ -481,7 +479,7 @@
 {
 	struct Qdisc *sch;
 
-	sch = qdisc_alloc(dev, dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops);
 	if (IS_ERR(sch))
 		goto errout;
 	sch->stats_lock = &dev->queue_lock;
@@ -534,7 +532,7 @@
 		ops->destroy(qdisc);
 
 	module_put(ops->owner);
-	dev_put(qdisc->dev);
+	dev_put(qdisc_dev(qdisc));
 	call_rcu(&qdisc->q_rcu, __qdisc_destroy);
 }
 EXPORT_SYMBOL(qdisc_destroy);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index c89fba5..39fa285 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -164,7 +164,7 @@
 			 * if no default DP has been configured. This
 			 * allows for DP flows to be left untouched.
 			 */
-			if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
+			if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
 				return qdisc_enqueue_tail(skb, sch);
 			else
 				goto drop;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5a22fec..3335254 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@
 
 		if (tca[TCA_RATE])
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
-					      &sch->dev->queue_lock,
+					      &qdisc_dev(sch)->queue_lock,
 					      tca[TCA_RATE]);
 		return 0;
 	}
@@ -1083,7 +1083,7 @@
 	cl->refcnt    = 1;
 	cl->sched     = q;
 	cl->cl_parent = parent;
-	cl->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 				      &pfifo_qdisc_ops, classid);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
@@ -1104,7 +1104,7 @@
 
 	if (tca[TCA_RATE])
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
-				  &sch->dev->queue_lock, tca[TCA_RATE]);
+				  &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
 	*arg = (unsigned long)cl;
 	return 0;
 }
@@ -1202,7 +1202,7 @@
 	if (cl->level > 0)
 		return -EINVAL;
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+		new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					&pfifo_qdisc_ops,
 					cl->cl_common.classid);
 		if (new == NULL)
@@ -1445,7 +1445,7 @@
 	q->root.cl_common.classid = sch->handle;
 	q->root.refcnt  = 1;
 	q->root.sched   = q;
-	q->root.qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					  &pfifo_qdisc_ops,
 					  sch->handle);
 	if (q->root.qdisc == NULL)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 956a67f..31f7d15 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1026,7 +1026,7 @@
 	qdisc_watchdog_init(&q->watchdog, sch);
 	skb_queue_head_init(&q->direct_queue);
 
-	q->direct_qlen = sch->dev->tx_queue_len;
+	q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
 	if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
 		q->direct_qlen = 2;
 
@@ -1043,7 +1043,7 @@
 	struct nlattr *nest;
 	struct tc_htb_glob gopt;
 
-	spin_lock_bh(&sch->dev->queue_lock);
+	spin_lock_bh(&qdisc_dev(sch)->queue_lock);
 
 	gopt.direct_pkts = q->direct_pkts;
 	gopt.version = HTB_VER;
@@ -1057,11 +1057,11 @@
 	NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
 	nla_nest_end(skb, nest);
 
-	spin_unlock_bh(&sch->dev->queue_lock);
+	spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 	return skb->len;
 
 nla_put_failure:
-	spin_unlock_bh(&sch->dev->queue_lock);
+	spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 	nla_nest_cancel(skb, nest);
 	return -1;
 }
@@ -1073,7 +1073,7 @@
 	struct nlattr *nest;
 	struct tc_htb_opt opt;
 
-	spin_lock_bh(&sch->dev->queue_lock);
+	spin_lock_bh(&qdisc_dev(sch)->queue_lock);
 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
 	tcm->tcm_handle = cl->common.classid;
 	if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1095,11 @@
 	NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
 
 	nla_nest_end(skb, nest);
-	spin_unlock_bh(&sch->dev->queue_lock);
+	spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 	return skb->len;
 
 nla_put_failure:
-	spin_unlock_bh(&sch->dev->queue_lock);
+	spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 	nla_nest_cancel(skb, nest);
 	return -1;
 }
@@ -1129,7 +1129,7 @@
 
 	if (cl && !cl->level) {
 		if (new == NULL &&
-		    (new = qdisc_create_dflt(sch->dev, sch->dev_queue,
+		    (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					     &pfifo_qdisc_ops,
 					     cl->common.classid))
 		    == NULL)
@@ -1257,7 +1257,7 @@
 		return -EBUSY;
 
 	if (!cl->level && htb_parent_last_child(cl)) {
-		new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+		new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					  &pfifo_qdisc_ops,
 					  cl->parent->common.classid);
 		last_child = 1;
@@ -1365,7 +1365,7 @@
 			goto failure;
 
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
-				  &sch->dev->queue_lock,
+				  &qdisc_dev(sch)->queue_lock,
 				  tca[TCA_RATE] ? : &est.nla);
 		cl->refcnt = 1;
 		cl->children = 0;
@@ -1378,7 +1378,7 @@
 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
 		   so that can't be used inside of sch_tree_lock
 		   -- thanks to Karlis Peisenieks */
-		new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
+		new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 					  &pfifo_qdisc_ops, classid);
 		sch_tree_lock(sch);
 		if (parent && !parent->level) {
@@ -1420,7 +1420,7 @@
 	} else {
 		if (tca[TCA_RATE])
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
-					      &sch->dev->queue_lock,
+					      &qdisc_dev(sch)->queue_lock,
 					      tca[TCA_RATE]);
 		sch_tree_lock(sch);
 	}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aa7a04e..7905829 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@
 	 * skb will be queued.
 	 */
 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
-		struct Qdisc *rootq = sch->dev->qdisc;
+		struct Qdisc *rootq = qdisc_dev(sch)->qdisc;
 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 		q->duplicate = 0;
 
@@ -333,9 +333,9 @@
 	for (i = 0; i < n; i++)
 		d->table[i] = data[i];
 
-	spin_lock_bh(&sch->dev->queue_lock);
+	spin_lock_bh(&qdisc_dev(sch)->queue_lock);
 	d = xchg(&q->delay_dist, d);
-	spin_unlock_bh(&sch->dev->queue_lock);
+	spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
 
 	kfree(d);
 	return 0;
@@ -495,7 +495,7 @@
 
 		q->limit = ctl->limit;
 	} else
-		q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
+		q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
 
 	q->oldest = PSCHED_PASTPERFECT;
 	return 0;
@@ -536,7 +536,7 @@
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
+	q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 				     &tfifo_qdisc_ops,
 				     TC_H_MAKE(sch->handle, 1));
 	if (!q->qdisc) {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index ca58a03..39157f7 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -136,7 +136,8 @@
 		 * pulling an skb.  This way we avoid excessive requeues
 		 * for slower queues.
 		 */
-		if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
+		if (!__netif_subqueue_stopped(qdisc_dev(sch),
+					      (q->mq ? prio : 0))) {
 			qdisc = q->queues[prio];
 			skb = qdisc->dequeue(qdisc);
 			if (skb) {
@@ -165,8 +166,8 @@
 		 * for slower queues.  If the queue is stopped, try the
 		 * next queue.
 		 */
-		if (!__netif_subqueue_stopped(sch->dev,
-					    (q->mq ? q->curband : 0))) {
+		if (!__netif_subqueue_stopped(qdisc_dev(sch),
+					      (q->mq ? q->curband : 0))) {
 			qdisc = q->queues[q->curband];
 			skb = qdisc->dequeue(qdisc);
 			if (skb) {
@@ -249,10 +250,10 @@
 	if (q->mq) {
 		if (sch->parent != TC_H_ROOT)
 			return -EINVAL;
-		if (netif_is_multiqueue(sch->dev)) {
+		if (netif_is_multiqueue(qdisc_dev(sch))) {
 			if (q->bands == 0)
-				q->bands = sch->dev->egress_subqueue_count;
-			else if (q->bands != sch->dev->egress_subqueue_count)
+				q->bands = qdisc_dev(sch)->egress_subqueue_count;
+			else if (q->bands != qdisc_dev(sch)->egress_subqueue_count)
 				return -EINVAL;
 		} else
 			return -EOPNOTSUPP;
@@ -281,7 +282,7 @@
 	for (i=0; i<q->bands; i++) {
 		if (q->queues[i] == &noop_qdisc) {
 			struct Qdisc *child;
-			child = qdisc_create_dflt(sch->dev, sch->dev_queue,
+			child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
 						  &pfifo_qdisc_ops,
 						  TC_H_MAKE(sch->handle, i + 1));
 			if (child) {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6a97afb..8458f63 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -461,7 +461,7 @@
 		return -EINVAL;
 
 	sch_tree_lock(sch);
-	q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
+	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
 	q->perturb_period = ctl->perturb_period * HZ;
 	if (ctl->limit)
 		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -502,7 +502,7 @@
 	q->max_depth = 0;
 	q->tail = SFQ_DEPTH;
 	if (opt == NULL) {
-		q->quantum = psched_mtu(sch->dev);
+		q->quantum = psched_mtu(qdisc_dev(sch));
 		q->perturb_period = 0;
 		q->perturbation = net_random();
 	} else {
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 0444fd0..b3fc826 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -78,7 +78,7 @@
 static int
 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
-	struct net_device *dev = sch->dev;
+	struct net_device *dev = qdisc_dev(sch);
 	struct teql_sched_data *q = qdisc_priv(sch);
 
 	if (q->q.qlen < dev->tx_queue_len) {
@@ -111,7 +111,7 @@
 
 	skb = __skb_dequeue(&dat->q);
 	if (skb == NULL) {
-		struct net_device *m = dat->m->dev->qdisc->dev;
+		struct net_device *m = qdisc_dev(dat->m->dev->qdisc);
 		if (m) {
 			dat->m->slaves = sch;
 			netif_wake_queue(m);
@@ -170,7 +170,7 @@
 
 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 {
-	struct net_device *dev = sch->dev;
+	struct net_device *dev = qdisc_dev(sch);
 	struct teql_master *m = (struct teql_master*)sch->ops;
 	struct teql_sched_data *q = qdisc_priv(sch);
 
@@ -282,7 +282,7 @@
 		goto drop;
 
 	do {
-		struct net_device *slave = q->dev;
+		struct net_device *slave = qdisc_dev(q);
 
 		if (slave->qdisc_sleeping != q)
 			continue;
@@ -352,7 +352,7 @@
 
 	q = m->slaves;
 	do {
-		struct net_device *slave = q->dev;
+		struct net_device *slave = qdisc_dev(q);
 
 		if (slave == NULL)
 			return -EUNATCH;
@@ -403,7 +403,7 @@
 	q = m->slaves;
 	if (q) {
 		do {
-			if (new_mtu > q->dev->mtu)
+			if (new_mtu > qdisc_dev(q)->mtu)
 				return -EINVAL;
 		} while ((q=NEXT_SLAVE(q)) != m->slaves);
 	}