qcacmn: Fix for SMMU fault on reo cmd desc address

Memory leak fixed in dp_rx_tid_delete_wifi3 in the case
where update cmd send fails. Add the desc to the front of
free desc list and set a flag to ensure that the update
cmd is resent from delete rx_tid callback. In the same
callback ensure that the update cmd desc is sent out first
and then proceed to free other descs by sending out the
flush cache cmd for each of those descs.

If the flush cache cmd is not sent successfully to REO,
add the REO descriptor back to the free desc list and
defer the deletion to the callback function. This update
is made under MCL feature flag REO_DESC_DEFER_FREE.

Add a soc level stat to indicate the reo send cmd failures.

Change-Id: I4e4cf47223d5325a53f2df22571895489d033ba3
CRs-Fixed: 2551028
diff --git a/dp/wifi3.0/dp_peer.c b/dp/wifi3.0/dp_peer.c
index 0ce3503..193ed75 100644
--- a/dp/wifi3.0/dp_peer.c
+++ b/dp/wifi3.0/dp_peer.c
@@ -2007,6 +2007,88 @@
 	return err;
 }
 
+#ifdef REO_DESC_DEFER_FREE
+/*
+ * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
+ * desc back to freelist and defer the deletion
+ *
+ * @soc: DP SOC handle
+ * @desc: Base descriptor to be freed
+ * @reo_status: REO command status
+ */
+static void dp_reo_desc_clean_up(struct dp_soc *soc,
+				 struct reo_desc_list_node *desc,
+				 union hal_reo_status *reo_status)
+{
+	desc->free_ts = qdf_get_system_timestamp();
+	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
+	qdf_list_insert_back(&soc->reo_desc_freelist,
+			     (qdf_list_node_t *)desc);
+}
+
+#else
+/*
+ * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
+ * cache fails free the base REO desc anyway
+ *
+ * @soc: DP SOC handle
+ * @desc: Base descriptor to be freed
+ * @reo_status: REO command status
+ */
+static void dp_reo_desc_clean_up(struct dp_soc *soc,
+				 struct reo_desc_list_node *desc,
+				 union hal_reo_status *reo_status)
+{
+	if (reo_status) {
+		qdf_mem_zero(reo_status, sizeof(*reo_status));
+		reo_status->fl_cache_status.header.status = 0;
+		dp_reo_desc_free(soc, (void *)desc, reo_status);
+	}
+}
+#endif
+
+/*
+ * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
+ * cmd and re-insert desc into free list if send fails.
+ *
+ * @soc: DP SOC handle
+ * @desc: desc with resend update cmd flag set
+ * @rx_tid: Desc RX tid associated with update cmd for resetting
+ * valid field to 0 in h/w
+ */
+static void dp_resend_update_reo_cmd(struct dp_soc *soc,
+				     struct reo_desc_list_node *desc,
+				     struct dp_rx_tid *rx_tid)
+{
+	struct hal_reo_cmd_params params;
+
+	qdf_mem_zero(&params, sizeof(params));
+	params.std.need_status = 1;
+	params.std.addr_lo =
+		rx_tid->hw_qdesc_paddr & 0xffffffff;
+	params.std.addr_hi =
+		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+	params.u.upd_queue_params.update_vld = 1;
+	params.u.upd_queue_params.vld = 0;
+	desc->resend_update_reo_cmd = false;
+	/*
+	 * If the cmd send fails then set resend_update_reo_cmd flag
+	 * and insert the desc at the end of the free list to retry.
+	 */
+	if (dp_reo_send_cmd(soc,
+			    CMD_UPDATE_RX_REO_QUEUE,
+			    &params,
+			    dp_rx_tid_delete_cb,
+			    (void *)desc)
+	    != QDF_STATUS_SUCCESS) {
+		desc->resend_update_reo_cmd = true;
+		desc->free_ts = qdf_get_system_timestamp();
+		qdf_list_insert_back(&soc->reo_desc_freelist,
+				     (qdf_list_node_t *)desc);
+		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
+	}
+}
+
 /*
  * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  * after deleting the entries (ie., setting valid=0)
@@ -2015,8 +2097,8 @@
  * @cb_ctxt: Callback context
  * @reo_status: REO command status
  */
-static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
-	union hal_reo_status *reo_status)
+void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
+			 union hal_reo_status *reo_status)
 {
 	struct reo_desc_list_node *freedesc =
 		(struct reo_desc_list_node *)cb_ctxt;
@@ -2054,7 +2136,8 @@
 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
 		((list_size >= REO_DESC_FREELIST_SIZE) ||
-		((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
+		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
+		(desc->resend_update_reo_cmd && list_size))) {
 		struct dp_rx_tid *rx_tid;
 
 		qdf_list_remove_front(&soc->reo_desc_freelist,
@@ -2062,6 +2145,12 @@
 		list_size--;
 		rx_tid = &desc->rx_tid;
 
+		/* First process descs with resend_update_reo_cmd set */
+		if (desc->resend_update_reo_cmd) {
+			dp_resend_update_reo_cmd(soc, desc, rx_tid);
+			continue;
+		}
+
 		/* Flush and invalidate REO descriptor from HW cache: Base and
 		 * extension descriptors should be flushed separately */
 		tot_desc_size = rx_tid->hw_qdesc_alloc_size;
@@ -2108,12 +2197,13 @@
 			 * TID queue desc also need to be freed accordingly.
 			 *
 			 * Here invoke desc_free function directly to do clean up.
+			 *
+			 * In case of MCL path add the desc back to the free
+			 * desc list and defer deletion.
 			 */
 			dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
 				   __func__, rx_tid->tid);
-			qdf_mem_zero(&reo_status, sizeof(reo_status));
-			reo_status.fl_cache_status.header.status = 0;
-			dp_reo_desc_free(soc, (void *)desc, &reo_status);
+			dp_reo_desc_clean_up(soc, desc, &reo_status);
 		}
 	}
 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
@@ -2142,6 +2232,7 @@
 	}
 
 	freedesc->rx_tid = *rx_tid;
+	freedesc->resend_update_reo_cmd = false;
 
 	qdf_mem_zero(&params, sizeof(params));
 
@@ -2151,8 +2242,19 @@
 	params.u.upd_queue_params.update_vld = 1;
 	params.u.upd_queue_params.vld = 0;
 
-	dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
-		dp_rx_tid_delete_cb, (void *)freedesc);
+	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
+			    dp_rx_tid_delete_cb, (void *)freedesc)
+		!= QDF_STATUS_SUCCESS) {
+		/* Defer the clean up to the call back context */
+		qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
+		freedesc->free_ts = qdf_get_system_timestamp();
+		freedesc->resend_update_reo_cmd = true;
+		qdf_list_insert_front(&soc->reo_desc_freelist,
+				      (qdf_list_node_t *)freedesc);
+		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
+		qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
+		dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
+	}
 
 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
 	rx_tid->hw_qdesc_alloc_size = 0;
diff --git a/dp/wifi3.0/dp_peer.h b/dp/wifi3.0/dp_peer.h
index d67576d..ed1fb94 100644
--- a/dp/wifi3.0/dp_peer.h
+++ b/dp/wifi3.0/dp_peer.h
@@ -250,4 +250,16 @@
 				  bool is_rx_pkt_cap_enable,
 				  bool is_tx_pkt_cap_enable,
 				  uint8_t *peer_mac);
+
+/*
+ * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
+ * after deleting the entries (ie., setting valid=0)
+ *
+ * @soc: DP SOC handle
+ * @cb_ctxt: Callback context
+ * @reo_status: REO command status
+ */
+void dp_rx_tid_delete_cb(struct dp_soc *soc,
+			 void *cb_ctxt,
+			 union hal_reo_status *reo_status);
 #endif /* _DP_PEER_H_ */
diff --git a/dp/wifi3.0/dp_stats.c b/dp/wifi3.0/dp_stats.c
index 8825af0..ec62d63 100644
--- a/dp/wifi3.0/dp_stats.c
+++ b/dp/wifi3.0/dp_stats.c
@@ -5757,6 +5757,8 @@
 				" %d", soc->stats.rx.err.reo_error[i]);
 	}
 	DP_PRINT_STATS("REO Error(0-14):%s", reo_error);
+	DP_PRINT_STATS("REO CMD SEND FAIL: %d",
+		       soc->stats.rx.err.reo_cmd_send_fail);
 }
 
 #ifdef FEATURE_TSO_STATS
diff --git a/dp/wifi3.0/dp_types.h b/dp/wifi3.0/dp_types.h
index 5df6e59..25466e8 100644
--- a/dp/wifi3.0/dp_types.h
+++ b/dp/wifi3.0/dp_types.h
@@ -654,6 +654,7 @@
 	qdf_list_node_t node;
 	unsigned long free_ts;
 	struct dp_rx_tid rx_tid;
+	bool resend_update_reo_cmd;
 };
 
 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
@@ -768,6 +769,8 @@
 			uint32_t hal_wbm_rel_dup;
 			/* HAL RXDMA error Duplicate count */
 			uint32_t hal_rxdma_err_dup;
+			/* REO cmd send fail/requeue count */
+			uint32_t reo_cmd_send_fail;
 		} err;
 
 		/* packet count per core - per ring */