netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.

Accesses are mostly structured such that when there are multiple TX
queues the code transformations will be a little bit simpler.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b6e52c0..8efa399 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,6 +627,18 @@
  */
 static struct lock_class_key vlan_netdev_xmit_lock_key;
 
+static void vlan_dev_set_lockdep_one(struct netdev_queue *txq,
+				     int subclass)
+{
+	lockdep_set_class_and_subclass(&txq->_xmit_lock,
+				       &vlan_netdev_xmit_lock_key, subclass);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+{
+	vlan_dev_set_lockdep_one(&dev->tx_queue, subclass);
+}
+
 static const struct header_ops vlan_header_ops = {
 	.create	 = vlan_dev_hard_header,
 	.rebuild = vlan_dev_rebuild_header,
@@ -668,8 +680,7 @@
 	if (is_vlan_dev(real_dev))
 		subclass = 1;
 
-	lockdep_set_class_and_subclass(&dev->_xmit_lock,
-				&vlan_netdev_xmit_lock_key, subclass);
+	vlan_dev_set_lockdep_class(dev, subclass);
 	return 0;
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 0218b0b..a29a359 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -258,7 +258,7 @@
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 /*
- * register_netdevice() inits dev->_xmit_lock and sets lockdep class
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  * according to dev->type
  */
 static const unsigned short netdev_lock_type[] =
@@ -1758,19 +1758,19 @@
 	if (dev->flags & IFF_UP) {
 		int cpu = smp_processor_id(); /* ok because BHs are off */
 
-		if (dev->xmit_lock_owner != cpu) {
+		if (txq->xmit_lock_owner != cpu) {
 
-			HARD_TX_LOCK(dev, cpu);
+			HARD_TX_LOCK(dev, txq, cpu);
 
 			if (!netif_queue_stopped(dev) &&
 			    !netif_subqueue_stopped(dev, skb)) {
 				rc = 0;
 				if (!dev_hard_start_xmit(skb, dev)) {
-					HARD_TX_UNLOCK(dev);
+					HARD_TX_UNLOCK(dev, txq);
 					goto out;
 				}
 			}
-			HARD_TX_UNLOCK(dev);
+			HARD_TX_UNLOCK(dev, txq);
 			if (net_ratelimit())
 				printk(KERN_CRIT "Virtual device %s asks to "
 				       "queue packet!\n", dev->name);
@@ -3761,6 +3761,20 @@
 	dev_put(dev);
 }
 
+static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue,
+					  struct net_device *dev)
+{
+	spin_lock_init(&dev_queue->_xmit_lock);
+	netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+	dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+	__netdev_init_queue_locks_one(&dev->tx_queue, dev);
+	__netdev_init_queue_locks_one(&dev->rx_queue, dev);
+}
+
 /**
  *	register_netdevice	- register a network device
  *	@dev: device to register
@@ -3795,9 +3809,7 @@
 	BUG_ON(!dev_net(dev));
 	net = dev_net(dev);
 
-	spin_lock_init(&dev->_xmit_lock);
-	netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
-	dev->xmit_lock_owner = -1;
+	netdev_init_queue_locks(dev);
 
 	dev->iflink = -1;
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 74884f4..819afc4 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -74,6 +74,16 @@
  */
 static struct lock_class_key nr_netdev_xmit_lock_key;
 
+static void nr_set_lockdep_one(struct netdev_queue *txq)
+{
+	lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
+}
+
+static void nr_set_lockdep_key(struct net_device *dev)
+{
+	nr_set_lockdep_one(&dev->tx_queue);
+}
+
 /*
  *	Socket removal during an interrupt is now safe.
  */
@@ -1430,7 +1440,7 @@
 			free_netdev(dev);
 			goto fail;
 		}
-		lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key);
+		nr_set_lockdep_key(dev);
 		dev_nr[i] = dev;
 	}
 
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 46461a6..7dbbc08 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -75,6 +75,16 @@
  */
 static struct lock_class_key rose_netdev_xmit_lock_key;
 
+static void rose_set_lockdep_one(struct netdev_queue *txq)
+{
+	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
+}
+
+static void rose_set_lockdep_key(struct net_device *dev)
+{
+	rose_set_lockdep_one(&dev->tx_queue);
+}
+
 /*
  *	Convert a ROSE address into text.
  */
@@ -1576,7 +1586,7 @@
 			free_netdev(dev);
 			goto fail;
 		}
-		lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
+		rose_set_lockdep_key(dev);
 		dev_rose[i] = dev;
 	}
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fcc7533..b6a36d3 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -92,10 +92,9 @@
 					   struct netdev_queue *dev_queue,
 					   struct Qdisc *q)
 {
-	struct net_device *dev = dev_queue->dev;
 	int ret;
 
-	if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
+	if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
 		/*
 		 * Same CPU holding the lock. It may be a transient
 		 * configuration error, when hard_start_xmit() recurses. We
@@ -105,7 +104,7 @@
 		kfree_skb(skb);
 		if (net_ratelimit())
 			printk(KERN_WARNING "Dead loop on netdevice %s, "
-			       "fix it urgently!\n", dev->name);
+			       "fix it urgently!\n", dev_queue->dev->name);
 		ret = qdisc_qlen(q);
 	} else {
 		/*
@@ -155,10 +154,10 @@
 
 	dev = txq->dev;
 
-	HARD_TX_LOCK(dev, smp_processor_id());
+	HARD_TX_LOCK(dev, txq, smp_processor_id());
 	if (!netif_subqueue_stopped(dev, skb))
 		ret = dev_hard_start_xmit(skb, dev);
-	HARD_TX_UNLOCK(dev);
+	HARD_TX_UNLOCK(dev, txq);
 
 	spin_lock(&txq->lock);
 	q = txq->qdisc;