i40e: keep allocated memory in structs

Save both a pointer to memory and the length in order to store all
info about allocated kernel memory.  This patch changes some adminq
allocations to preserve the full i40e_dma_mem/i40e_virt_mem structs
for every allocation.

Change-Id: Ibcf96159aba4ba61f839d16d87d19478df28e630
Signed-off-by: David Cassard <david.g.cassard@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Kavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c7e2866..d04ad3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -66,9 +66,8 @@
 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
-	struct i40e_virt_mem mem;
 
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
 					 i40e_mem_atq_ring,
 					 (hw->aq.num_asq_entries *
 					 sizeof(struct i40e_aq_desc)),
@@ -76,21 +75,14 @@
 	if (ret_code)
 		return ret_code;
 
-	hw->aq.asq.desc = hw->aq.asq_mem.va;
-	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
-	ret_code = i40e_allocate_virt_mem(hw, &mem,
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
 					  (hw->aq.num_asq_entries *
 					  sizeof(struct i40e_asq_cmd_details)));
 	if (ret_code) {
-		i40e_free_dma_mem(hw, &hw->aq.asq_mem);
-		hw->aq.asq_mem.va = NULL;
-		hw->aq.asq_mem.pa = 0;
+		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 		return ret_code;
 	}
 
-	hw->aq.asq.details = mem.va;
-
 	return ret_code;
 }
 
@@ -102,16 +94,11 @@
 {
 	i40e_status ret_code;
 
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
 					 i40e_mem_arq_ring,
 					 (hw->aq.num_arq_entries *
 					 sizeof(struct i40e_aq_desc)),
 					 I40E_ADMINQ_DESC_ALIGNMENT);
-	if (ret_code)
-		return ret_code;
-
-	hw->aq.arq.desc = hw->aq.arq_mem.va;
-	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
 
 	return ret_code;
 }
@@ -125,14 +112,7 @@
  **/
 static void i40e_free_adminq_asq(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
-
-	i40e_free_dma_mem(hw, &hw->aq.asq_mem);
-	hw->aq.asq_mem.va = NULL;
-	hw->aq.asq_mem.pa = 0;
-	mem.va = hw->aq.asq.details;
-	i40e_free_virt_mem(hw, &mem);
-	hw->aq.asq.details = NULL;
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 }
 
 /**
@@ -144,9 +124,7 @@
  **/
 static void i40e_free_adminq_arq(struct i40e_hw *hw)
 {
-	i40e_free_dma_mem(hw, &hw->aq.arq_mem);
-	hw->aq.arq_mem.va = NULL;
-	hw->aq.arq_mem.pa = 0;
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 }
 
 /**
@@ -157,7 +135,6 @@
 {
 	i40e_status ret_code;
 	struct i40e_aq_desc *desc;
-	struct i40e_virt_mem mem;
 	struct i40e_dma_mem *bi;
 	int i;
 
@@ -166,11 +143,11 @@
 	 */
 
 	/* buffer_info structures do not need alignment */
-	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
-					  sizeof(struct i40e_dma_mem)));
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
 	if (ret_code)
 		goto alloc_arq_bufs;
-	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
 
 	/* allocate the mapped buffers */
 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -212,8 +189,7 @@
 	i--;
 	for (; i >= 0; i--)
 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
-	mem.va = hw->aq.arq.r.arq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 
 	return ret_code;
 }
@@ -225,16 +201,15 @@
 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
-	struct i40e_virt_mem mem;
 	struct i40e_dma_mem *bi;
 	int i;
 
 	/* No mapped memory needed yet, just the buffer info structures */
-	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
-					  sizeof(struct i40e_dma_mem)));
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
 	if (ret_code)
 		goto alloc_asq_bufs;
-	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
 
 	/* allocate the mapped buffers */
 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -254,8 +229,7 @@
 	i--;
 	for (; i >= 0; i--)
 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
-	mem.va = hw->aq.asq.r.asq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 
 	return ret_code;
 }
@@ -266,14 +240,17 @@
  **/
 static void i40e_free_arq_bufs(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
 	int i;
 
+	/* free descriptors */
 	for (i = 0; i < hw->aq.num_arq_entries; i++)
 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 
-	mem.va = hw->aq.arq.r.arq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 }
 
 /**
@@ -282,7 +259,6 @@
  **/
 static void i40e_free_asq_bufs(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
 	int i;
 
 	/* only unmap if the address is non-NULL */
@@ -290,9 +266,14 @@
 		if (hw->aq.asq.r.asq_bi[i].pa)
 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 
-	/* now free the buffer info list */
-	mem.va = hw->aq.asq.r.asq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	/* free the buffer info list */
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 }
 
 /**
@@ -305,14 +286,18 @@
 {
 	if (hw->mac.type == I40E_MAC_VF) {
 		/* configure the transmit queue */
-		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
-		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_VF_ATQBAH1,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_VF_ATQBAL1,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
 		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
 					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
 	} else {
 		/* configure the transmit queue */
-		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
-		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_PF_ATQBAH,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_PF_ATQBAL,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
 		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
 					  I40E_PF_ATQLEN_ATQENABLE_MASK));
 	}
@@ -328,14 +313,18 @@
 {
 	if (hw->mac.type == I40E_MAC_VF) {
 		/* configure the receive queue */
-		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
-		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_VF_ARQBAH1,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_VF_ARQBAL1,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
 		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
 					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
 	} else {
 		/* configure the receive queue */
-		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
-		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_PF_ARQBAH,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_PF_ARQBAL,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
 		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
 					  I40E_PF_ARQLEN_ARQENABLE_MASK));
 	}
@@ -483,8 +472,6 @@
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
-	/* free the ring descriptors */
-	i40e_free_adminq_asq(hw);
 
 	mutex_unlock(&hw->aq.asq_mutex);
 
@@ -516,8 +503,6 @@
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
-	/* free the ring descriptors */
-	i40e_free_adminq_arq(hw);
 
 	mutex_unlock(&hw->aq.arq_mutex);