qcacmn: Support oom recovery for rx ce's

In oom condition, when an rx ce is oom, the ce would
deadlock as the hw couldn't copy any messages (due to the oom)
and the sw wouldn't initiate a pipe refill without a new message.

When this happens submit a work to a workqueue to try and kickstart
the cycle by allocating an nbuf and posting it to the ce.

Change-Id: I047818145dbe230fefc447b6b034705f9b87ca70
CRs-Fixed: 2015778
diff --git a/hif/src/ce/ce_main.c b/hif/src/ce/ce_main.c
index dd3b38c..48417a5 100644
--- a/hif/src/ce/ce_main.c
+++ b/hif/src/ce/ce_main.c
@@ -839,6 +839,26 @@
 	return 0;
 }
 
+/**
+ * ce_oom_recovery() - try to recover rx ce from oom condition
+ * @context: CE_state of the CE with oom rx ring
+ *
+ * the executing work Will continue to be rescheduled untill
+ * at least 1 descriptor is successfully posted to the rx ring.
+ *
+ * return: none
+ */
+static void ce_oom_recovery(void *context)
+{
+	struct CE_state *ce_state = context;
+	struct hif_softc *scn = ce_state->scn;
+	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
+	struct HIF_CE_pipe_info *pipe_info =
+		&ce_softc->pipe_info[ce_state->id];
+
+	hif_post_recv_buffers_for_pipe(pipe_info);
+}
+
 /*
  * Initialize a Copy Engine based on caller-supplied attributes.
  * This may be called once to initialize both source and destination
@@ -1062,6 +1082,9 @@
 			goto error_target_access;
 	}
 
+	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
+			ce_oom_recovery, CE_state);
+
 	/* update the htt_data attribute */
 	ce_mark_datapath(CE_state);
 
@@ -1744,11 +1767,16 @@
 	 *	there is no trigger to refill the ce and we will
 	 *	eventually crash
 	 */
-	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
+	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) {
+
 		QDF_ASSERT(0);
+		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
+	}
 }
 
 
+
+
 static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
 {
 	struct CE_handle *ce_hdl;
@@ -1788,8 +1816,7 @@
 		 * CE_data = dma_map_single(dev, data, buf_sz, );
 		 * DMA_FROM_DEVICE);
 		 */
-		ret =
-			qdf_nbuf_map_single(scn->qdf_dev, nbuf,
+		ret = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
 					    QDF_DMA_FROM_DEVICE);
 
 		if (unlikely(ret != QDF_STATUS_SUCCESS)) {
@@ -2026,6 +2053,19 @@
 	hif_buffer_cleanup(hif_state);
 }
 
+static void hif_destroy_oom_work(struct hif_softc *scn)
+{
+	struct CE_state *ce_state;
+	int ce_id;
+
+	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
+		ce_state = scn->ce_id_to_state[ce_id];
+		if (ce_state)
+			qdf_destroy_work(scn->qdf_dev,
+					 &ce_state->oom_allocation_work);
+	}
+}
+
 void hif_ce_stop(struct hif_softc *scn)
 {
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
@@ -2036,6 +2076,7 @@
 	 * bottom half contexts will not be re-entered
 	 */
 	hif_nointrs(scn);
+	hif_destroy_oom_work(scn);
 	scn->hif_init_done = false;
 
 	/*