mv643xx_eth: transmit multiqueue support

As all the infrastructure for multiple transmit queues already exists
in the driver, this patch is entirely trivial.

The individual transmit queues are still serialised by the driver's
per-port private spinlock, but that will disappear (i.e. be replaced
by the per-subqueue ->_xmit_lock) in a subsequent patch.

Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e592fac..1ceed87 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -449,15 +449,10 @@
 static void __txq_maybe_wake(struct tx_queue *txq)
 {
 	struct mv643xx_eth_private *mp = txq_to_mp(txq);
-
-	/*
-	 * netif_{stop,wake}_queue() flow control only applies to
-	 * the primary queue.
-	 */
-	BUG_ON(txq->index != 0);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
 
 	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
-		netif_wake_queue(mp->dev);
+		netif_tx_wake_queue(nq);
 }
 
 
@@ -827,8 +822,11 @@
 {
 	struct mv643xx_eth_private *mp = netdev_priv(dev);
 	struct net_device_stats *stats = &dev->stats;
+	int queue;
 	struct tx_queue *txq;
+	struct netdev_queue *nq;
 	unsigned long flags;
+	int entries_left;
 
 	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
 		stats->tx_dropped++;
@@ -838,15 +836,16 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	spin_lock_irqsave(&mp->lock, flags);
+	queue = skb_get_queue_mapping(skb);
+	txq = mp->txq + queue;
+	nq = netdev_get_tx_queue(dev, queue);
 
-	txq = mp->txq;
+	spin_lock_irqsave(&mp->lock, flags);
 
 	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
 		spin_unlock_irqrestore(&mp->lock, flags);
-		if (txq->index == 0 && net_ratelimit())
-			dev_printk(KERN_ERR, &dev->dev,
-				   "primary tx queue full?!\n");
+		if (net_ratelimit())
+			dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
 		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
@@ -856,13 +855,9 @@
 	stats->tx_packets++;
 	dev->trans_start = jiffies;
 
-	if (txq->index == 0) {
-		int entries_left;
-
-		entries_left = txq->tx_ring_size - txq->tx_desc_count;
-		if (entries_left < MAX_SKB_FRAGS + 1)
-			netif_stop_queue(dev);
-	}
+	entries_left = txq->tx_ring_size - txq->tx_desc_count;
+	if (entries_left < MAX_SKB_FRAGS + 1)
+		netif_tx_stop_queue(nq);
 
 	spin_unlock_irqrestore(&mp->lock, flags);
 
@@ -2169,10 +2164,10 @@
 
 	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
 	if (netif_running(mp->dev)) {
-		netif_stop_queue(mp->dev);
+		netif_tx_stop_all_queues(mp->dev);
 		port_reset(mp);
 		port_start(mp);
-		netif_wake_queue(mp->dev);
+		netif_tx_wake_all_queues(mp->dev);
 	}
 }
 
@@ -2546,7 +2541,7 @@
 		return -ENODEV;
 	}
 
-	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
+	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
 	if (!dev)
 		return -ENOMEM;
 
@@ -2559,6 +2554,7 @@
 	mp->dev = dev;
 
 	set_params(mp, pd);
+	dev->real_num_tx_queues = mp->txq_count;
 
 	spin_lock_init(&mp->lock);