igb: move alloc_failed and csum_err stats into per rx-ring stat

The allocation failed and checksum error stats are currently kept as a
global stat.  If we end up allocating the queues to multiple netdevs then
the global counter doesn't make much sense.  For this reason I felt it
necessary to move the alloc_rx_buff_failed stat into the rx_stats
portion of the rx_ring.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 04e860d..bdd7bf0 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3562,8 +3562,6 @@
 static int __igb_maybe_stop_tx(struct net_device *netdev,
 			       struct igb_ring *tx_ring, int size)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
-
 	netif_stop_subqueue(netdev, tx_ring->queue_index);
 
 	/* Herbert's original patch had:
@@ -3578,7 +3576,7 @@
 
 	/* A reprieve! */
 	netif_wake_subqueue(netdev, tx_ring->queue_index);
-	++adapter->restart_queue;
+	tx_ring->tx_stats.restart_queue++;
 	return 0;
 }
 
@@ -4734,7 +4732,7 @@
 		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
 			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			++adapter->restart_queue;
+			tx_ring->tx_stats.restart_queue++;
 		}
 	}
 
@@ -4801,7 +4799,8 @@
 		napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
+                                       struct igb_adapter *adapter,
 				       u32 status_err, struct sk_buff *skb)
 {
 	skb->ip_summed = CHECKSUM_NONE;
@@ -4820,7 +4819,7 @@
 		 */
 		if (!((adapter->hw.mac.type == e1000_82576) &&
 		      (skb->len == 60)))
-			adapter->hw_csum_err++;
+			ring->rx_stats.csum_err++;
 		/* let the stack verify checksum errors */
 		return;
 	}
@@ -4979,7 +4978,7 @@
 		total_bytes += skb->len;
 		total_packets++;
 
-		igb_rx_checksum_adv(adapter, staterr, skb);
+		igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
 
 		skb->protocol = eth_type_trans(skb, netdev);
 		skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -5046,7 +5045,7 @@
 			if (!buffer_info->page) {
 				buffer_info->page = alloc_page(GFP_ATOMIC);
 				if (!buffer_info->page) {
-					adapter->alloc_rx_buff_failed++;
+					rx_ring->rx_stats.alloc_failed++;
 					goto no_buffers;
 				}
 				buffer_info->page_offset = 0;
@@ -5063,7 +5062,7 @@
 		if (!buffer_info->skb) {
 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_failed++;
 				goto no_buffers;
 			}