gianfar: Support NAPI for TX Frames

Poll the completed TX frames in gfar_poll().  This prevents the tx
completion interrupt from interfering with processing of received
frames.

We also disable hardware rx coalescing when NAPI is enabled.

Signed-off-by: Dai Haruki <dai.haruki@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 601f93e..c8c3df7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1250,17 +1250,12 @@
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+int gfar_clean_tx_ring(struct net_device *dev)
 {
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
 	struct txbd8 *bdp;
+	struct gfar_private *priv = netdev_priv(dev);
+	int howmany = 0;
 
-	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
-	/* Lock priv */
-	spin_lock(&priv->txlock);
 	bdp = priv->dirty_tx;
 	while ((bdp->status & TXBD_READY) == 0) {
 		/* If dirty_tx and cur_tx are the same, then either the */
@@ -1269,7 +1264,7 @@
 		if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
 			break;
 
-		dev->stats.tx_packets++;
+		howmany++;
 
 		/* Deferred means some collisions occurred during transmit, */
 		/* but we eventually sent the packet. */
@@ -1278,11 +1273,15 @@
 
 		/* Free the sk buffer associated with this TxBD */
 		dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+
 		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
 		priv->skb_dirtytx =
 		    (priv->skb_dirtytx +
 		     1) & TX_RING_MOD_MASK(priv->tx_ring_size);
 
+		/* Clean BD length for empty detection */
+		bdp->length = 0;
+
 		/* update bdp to point at next bd in the ring (wrapping if necessary) */
 		if (bdp->status & TXBD_WRAP)
 			bdp = priv->tx_bd_base;
@@ -1297,6 +1296,25 @@
 			netif_wake_queue(dev);
 	} /* while ((bdp->status & TXBD_READY) == 0) */
 
+	dev->stats.tx_packets += howmany;
+
+	return howmany;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct gfar_private *priv = netdev_priv(dev);
+
+	/* Clear IEVENT */
+	gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+	/* Lock priv */
+	spin_lock(&priv->txlock);
+
+	gfar_clean_tx_ring(dev);
+
 	/* If we are coalescing the interrupts, reset the timer */
 	/* Otherwise, clear it */
 	if (likely(priv->txcoalescing)) {
@@ -1392,15 +1410,15 @@
 	unsigned long flags;
 #endif
 
-	/* Clear IEVENT, so rx interrupt isn't called again
-	 * because of this interrupt */
-	gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
-
 	/* support NAPI */
 #ifdef CONFIG_GFAR_NAPI
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived */
+	gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+
 	if (netif_rx_schedule_prep(dev, &priv->napi)) {
 		tempval = gfar_read(&priv->regs->imask);
-		tempval &= IMASK_RX_DISABLED;
+		tempval &= IMASK_RTX_DISABLED;
 		gfar_write(&priv->regs->imask, tempval);
 
 		__netif_rx_schedule(dev, &priv->napi);
@@ -1411,6 +1429,9 @@
 				gfar_read(&priv->regs->imask));
 	}
 #else
+	/* Clear IEVENT, so rx interrupt isn't called again
+	 * because of this interrupt */
+	gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
 
 	spin_lock_irqsave(&priv->rxlock, flags);
 	gfar_clean_rx_ring(dev, priv->rx_ring_size);
@@ -1580,6 +1601,13 @@
 	struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
 	struct net_device *dev = priv->dev;
 	int howmany;
+	unsigned long flags;
+
+	/* If we fail to get the lock, don't bother with the TX BDs */
+	if (spin_trylock_irqsave(&priv->txlock, flags)) {
+		gfar_clean_tx_ring(dev);
+		spin_unlock_irqrestore(&priv->txlock, flags);
+	}
 
 	howmany = gfar_clean_rx_ring(dev, budget);