i40e/i40evf: Moves skb from i40e_rx_buffer to i40e_ring

This patch reduces the size of struct i40e_rx_buffer by one pointer,
and makes the i40e driver a little more consistent with the igb driver
in terms of packets that span buffers.

We do this by moving the skb field from struct i40e_rx_buffer to
struct i40e_ring. We pass the skb we already have (or NULL if we
don't) to i40e_fetch_rx_buffer(), which skips the skb allocation if we
already have one for this packet.

Change-ID: I4ad48a531844494ba0c5d8e1a62209a057f661b0
Signed-off-by: Scott Peterson <scott.d.peterson@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index a5fc789..8274ba6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -239,7 +239,6 @@ struct i40e_tx_buffer {
 };
 
 struct i40e_rx_buffer {
-	struct sk_buff *skb;
 	dma_addr_t dma;
 	struct page *page;
 	unsigned int page_offset;
@@ -340,6 +339,14 @@ struct i40e_ring {
 
 	struct rcu_head rcu;		/* to avoid race on free */
 	u16 next_to_alloc;
+	struct sk_buff *skb;		/* When i40evf_clean_rx_ring_irq() must
+					 * return before it sees the EOP for
+					 * the current packet, we save that skb
+					 * here and resume receiving this
+					 * packet the next time
+					 * i40evf_clean_rx_ring_irq() is called
+					 * for this ring.
+					 */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {