qcacld-3.0: Separate out different flow control implementation

Separate out QCA_LL_LEGACY_TX_FLOW_CONTROL
and QCA_LL_TX_FLOW_CONTROL_V2 flow control implementation
in different files to compile out features cleanly.

Change-Id: I5d6ddf9ea61b409b25d242852ed1f0102e94ad88
CRs-Fixed: 2228902
diff --git a/Kbuild b/Kbuild
index 153acb1..de909f1 100644
--- a/Kbuild
+++ b/Kbuild
@@ -884,6 +884,10 @@
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_flow_control.o
 endif
 
+ifeq ($(CONFIG_WLAN_TX_FLOW_CONTROL_LEGACY), y)
+TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_legacy_flow_control.o
+endif
+
 ifeq ($(CONFIG_IPA_OFFLOAD), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_ipa.o
 endif
diff --git a/core/dp/txrx/ol_tx.c b/core/dp/txrx/ol_tx.c
index f47ecdb..e335a7c 100644
--- a/core/dp/txrx/ol_tx.c
+++ b/core/dp/txrx/ol_tx.c
@@ -356,294 +356,6 @@
 }
 #endif
 
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-
-#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
-#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
-static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
-{
-	int max_to_accept;
-
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	if (vdev->ll_pause.paused_reason) {
-		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-		return;
-	}
-
-	/*
-	 * Send as much of the backlog as possible, but leave some margin
-	 * of unallocated tx descriptors that can be used for new frames
-	 * being transmitted by other vdevs.
-	 * Ideally there would be a scheduler, which would not only leave
-	 * some margin for new frames for other vdevs, but also would
-	 * fairly apportion the tx descriptors between multiple vdevs that
-	 * have backlogs in their pause queues.
-	 * However, the fairness benefit of having a scheduler for frames
-	 * from multiple vdev's pause queues is not sufficient to outweigh
-	 * the extra complexity.
-	 */
-	max_to_accept = vdev->pdev->tx_desc.num_free -
-		OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
-	while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
-		qdf_nbuf_t tx_msdu;
-
-		max_to_accept--;
-		vdev->ll_pause.txq.depth--;
-		tx_msdu = vdev->ll_pause.txq.head;
-		if (tx_msdu) {
-			vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
-			if (NULL == vdev->ll_pause.txq.head)
-				vdev->ll_pause.txq.tail = NULL;
-			qdf_nbuf_set_next(tx_msdu, NULL);
-			QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
-						QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
-			tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
-			/*
-			 * It is unexpected that ol_tx_ll would reject the frame
-			 * since we checked that there's room for it, though
-			 * there's an infinitesimal possibility that between the
-			 * time we checked the room available and now, a
-			 * concurrent batch of tx frames used up all the room.
-			 * For simplicity, just drop the frame.
-			 */
-			if (tx_msdu) {
-				qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
-					       QDF_DMA_TO_DEVICE);
-				qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
-			}
-		}
-	}
-	if (vdev->ll_pause.txq.depth) {
-		qdf_timer_stop(&vdev->ll_pause.timer);
-		if (!qdf_atomic_read(&vdev->delete.detaching)) {
-			qdf_timer_start(&vdev->ll_pause.timer,
-					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
-			vdev->ll_pause.is_q_timer_on = true;
-		}
-		if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
-			vdev->ll_pause.q_overflow_cnt++;
-	}
-
-	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-}
-
-static qdf_nbuf_t
-ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
-			      qdf_nbuf_t msdu_list, uint8_t start_timer)
-{
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	while (msdu_list &&
-	       vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
-		qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
-
-		QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
-					     QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
-		DPTRACE(qdf_dp_trace(msdu_list,
-				QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
-				QDF_TRACE_DEFAULT_PDEV_ID,
-				qdf_nbuf_data_addr(msdu_list),
-				sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
-
-		vdev->ll_pause.txq.depth++;
-		if (!vdev->ll_pause.txq.head) {
-			vdev->ll_pause.txq.head = msdu_list;
-			vdev->ll_pause.txq.tail = msdu_list;
-		} else {
-			qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
-		}
-		vdev->ll_pause.txq.tail = msdu_list;
-
-		msdu_list = next;
-	}
-	if (vdev->ll_pause.txq.tail)
-		qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
-
-	if (start_timer) {
-		qdf_timer_stop(&vdev->ll_pause.timer);
-		if (!qdf_atomic_read(&vdev->delete.detaching)) {
-			qdf_timer_start(&vdev->ll_pause.timer,
-					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
-			vdev->ll_pause.is_q_timer_on = true;
-		}
-	}
-	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-
-	return msdu_list;
-}
-
-/*
- * Store up the tx frame in the vdev's tx queue if the vdev is paused.
- * If there are too many frames in the tx queue, reject it.
- */
-qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
-{
-	uint16_t eth_type;
-	uint32_t paused_reason;
-
-	if (msdu_list == NULL)
-		return NULL;
-
-	paused_reason = vdev->ll_pause.paused_reason;
-	if (paused_reason) {
-		if (qdf_unlikely((paused_reason &
-				  OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
-				 paused_reason)) {
-			eth_type = (((struct ethernet_hdr_t *)
-				     qdf_nbuf_data(msdu_list))->
-				    ethertype[0] << 8) |
-				   (((struct ethernet_hdr_t *)
-				     qdf_nbuf_data(msdu_list))->ethertype[1]);
-			if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
-				msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
-				return msdu_list;
-			}
-		}
-		msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
-	} else {
-		if (vdev->ll_pause.txq.depth > 0 ||
-		    vdev->pdev->tx_throttle.current_throttle_level !=
-		    THROTTLE_LEVEL_0) {
-			/*
-			 * not paused, but there is a backlog of frms
-			 * from a prior pause or throttle off phase
-			 */
-			msdu_list = ol_tx_vdev_pause_queue_append(
-				vdev, msdu_list, 0);
-			/*
-			 * if throttle is disabled or phase is "on",
-			 * send the frame
-			 */
-			if (vdev->pdev->tx_throttle.current_throttle_level ==
-			    THROTTLE_LEVEL_0 ||
-			    vdev->pdev->tx_throttle.current_throttle_phase ==
-			    THROTTLE_PHASE_ON) {
-				/*
-				 * send as many frames as possible
-				 * from the vdevs backlog
-				 */
-				ol_tx_vdev_ll_pause_queue_send_base(vdev);
-			}
-		} else {
-			/*
-			 * not paused, no throttle and no backlog -
-			 * send the new frames
-			 */
-			msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
-		}
-	}
-	return msdu_list;
-}
-
-/*
- * Run through the transmit queues for all the vdevs and
- * send the pending frames
- */
-void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
-{
-	int max_to_send;        /* tracks how many frames have been sent */
-	qdf_nbuf_t tx_msdu;
-	struct ol_txrx_vdev_t *vdev = NULL;
-	uint8_t more;
-
-	if (NULL == pdev)
-		return;
-
-	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
-		return;
-
-	/* ensure that we send no more than tx_threshold frames at once */
-	max_to_send = pdev->tx_throttle.tx_threshold;
-
-	/* round robin through the vdev queues for the given pdev */
-
-	/*
-	 * Potential improvement: download several frames from the same vdev
-	 * at a time, since it is more likely that those frames could be
-	 * aggregated together, remember which vdev was serviced last,
-	 * so the next call this function can resume the round-robin
-	 * traversing where the current invocation left off
-	 */
-	do {
-		more = 0;
-		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
-
-			qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-			if (vdev->ll_pause.txq.depth) {
-				if (vdev->ll_pause.paused_reason) {
-					qdf_spin_unlock_bh(&vdev->ll_pause.
-							   mutex);
-					continue;
-				}
-
-				tx_msdu = vdev->ll_pause.txq.head;
-				if (NULL == tx_msdu) {
-					qdf_spin_unlock_bh(&vdev->ll_pause.
-							   mutex);
-					continue;
-				}
-
-				max_to_send--;
-				vdev->ll_pause.txq.depth--;
-
-				vdev->ll_pause.txq.head =
-					qdf_nbuf_next(tx_msdu);
-
-				if (NULL == vdev->ll_pause.txq.head)
-					vdev->ll_pause.txq.tail = NULL;
-
-				qdf_nbuf_set_next(tx_msdu, NULL);
-				tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
-				/*
-				 * It is unexpected that ol_tx_ll would reject
-				 * the frame, since we checked that there's
-				 * room for it, though there's an infinitesimal
-				 * possibility that between the time we checked
-				 * the room available and now, a concurrent
-				 * batch of tx frames used up all the room.
-				 * For simplicity, just drop the frame.
-				 */
-				if (tx_msdu) {
-					qdf_nbuf_unmap(pdev->osdev, tx_msdu,
-						       QDF_DMA_TO_DEVICE);
-					qdf_nbuf_tx_free(tx_msdu,
-							 QDF_NBUF_PKT_ERROR);
-				}
-			}
-			/*check if there are more msdus to transmit */
-			if (vdev->ll_pause.txq.depth)
-				more = 1;
-			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-		}
-	} while (more && max_to_send);
-
-	vdev = NULL;
-	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
-		qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-		if (vdev->ll_pause.txq.depth) {
-			qdf_timer_stop(&pdev->tx_throttle.tx_timer);
-			qdf_timer_start(
-				&pdev->tx_throttle.tx_timer,
-				OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
-			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-			return;
-		}
-		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-	}
-}
-
-void ol_tx_vdev_ll_pause_queue_send(void *context)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-
-	if (pdev &&
-	    pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
-	    pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
-		return;
-	ol_tx_vdev_ll_pause_queue_send_base(vdev);
-}
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
-
 static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
 {
 	return
diff --git a/core/dp/txrx/ol_tx_queue.c b/core/dp/txrx/ol_tx_queue.c
index efbcf4c..dc8a365 100644
--- a/core/dp/txrx/ol_tx_queue.c
+++ b/core/dp/txrx/ol_tx_queue.c
@@ -1695,208 +1695,7 @@
 
 #endif /* defined(CONFIG_HL_SUPPORT) */
 
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
-
-/**
- * ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
- *
- * @data_vdev - the virtual device being paused
- * @reason - the reason for which vdev queue is getting paused
- *
- * This function applies primarily to HL systems, but also
- * applies to LL systems that use per-vdev tx queues for MCC or
- * thermal throttling. As an example, this function could be
- * used when a single-channel physical device supports multiple
- * channels by jumping back and forth between the channels in a
- * time-shared manner.  As the device is switched from channel A
- * to channel B, the virtual devices that operate on channel A
- * will be paused.
- *
- */
-void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-
-	/* TO DO: log the queue pause */
-	/* acquire the mutex lock, since we'll be modifying the queues */
-	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
-
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	vdev->ll_pause.paused_reason |= reason;
-	vdev->ll_pause.q_pause_cnt++;
-	vdev->ll_pause.is_q_paused = true;
-	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-
-	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
-}
-
-/**
- * ol_txrx_vdev_unpause - Resume tx for the specified virtual device
- *
- * @data_vdev - the virtual device being unpaused
- * @reason - the reason for which vdev queue is getting unpaused
- *
- * This function applies primarily to HL systems, but also applies to
- * LL systems that use per-vdev tx queues for MCC or thermal throttling.
- *
- */
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-	/* TO DO: log the queue unpause */
-	/* acquire the mutex lock, since we'll be modifying the queues */
-	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
-
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	if (vdev->ll_pause.paused_reason & reason) {
-		vdev->ll_pause.paused_reason &= ~reason;
-		if (!vdev->ll_pause.paused_reason) {
-			vdev->ll_pause.is_q_paused = false;
-			vdev->ll_pause.q_unpause_cnt++;
-			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-			ol_tx_vdev_ll_pause_queue_send((unsigned long) vdev);
-		} else {
-			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-		}
-	} else {
-		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-	}
-	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
-}
-
-/**
- * ol_txrx_vdev_flush - Drop all tx data for the specified virtual device
- *
- * @data_vdev - the virtual device being flushed
- *
- *  This function applies primarily to HL systems, but also applies to
- *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
- *  This function would typically be used by the ctrl SW after it parks
- *  a STA vdev and then resumes it, but to a new AP.  In this case, though
- *  the same vdev can be used, any old tx frames queued inside it would be
- *  stale, and would need to be discarded.
- *
- */
-void ol_txrx_vdev_flush(struct cdp_vdev *pvdev)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	qdf_timer_stop(&vdev->ll_pause.timer);
-	vdev->ll_pause.is_q_timer_on = false;
-	while (vdev->ll_pause.txq.head) {
-		qdf_nbuf_t next =
-			qdf_nbuf_next(vdev->ll_pause.txq.head);
-		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
-		if (QDF_NBUF_CB_PADDR(vdev->ll_pause.txq.head) &&
-			!qdf_nbuf_ipa_owned_get(vdev->ll_pause.txq.head)) {
-			qdf_nbuf_unmap(vdev->pdev->osdev,
-				       vdev->ll_pause.txq.head,
-				       QDF_DMA_TO_DEVICE);
-		}
-		qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
-				 QDF_NBUF_PKT_ERROR);
-		vdev->ll_pause.txq.head = next;
-	}
-	vdev->ll_pause.txq.tail = NULL;
-	vdev->ll_pause.txq.depth = 0;
-	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-}
-#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
-
-#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
-void ol_txrx_vdev_flush(struct cdp_vdev *data_vdev)
-{
-}
-#endif
-
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
-#ifndef CONFIG_ICNSS
-
-/**
- * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
- * @reason: reason
- *
- * Return: netif_reason_type
- */
-static enum netif_reason_type
-ol_txrx_map_to_netif_reason_type(uint32_t reason)
-{
-	switch (reason) {
-	case OL_TXQ_PAUSE_REASON_FW:
-		return WLAN_FW_PAUSE;
-	case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
-		return WLAN_PEER_UNAUTHORISED;
-	case OL_TXQ_PAUSE_REASON_TX_ABORT:
-		return WLAN_TX_ABORT;
-	case OL_TXQ_PAUSE_REASON_VDEV_STOP:
-		return WLAN_VDEV_STOP;
-	case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
-		return WLAN_THERMAL_MITIGATION;
-	default:
-		ol_txrx_err(
-			   "%s: reason not supported %d\n",
-			   __func__, reason);
-		return WLAN_REASON_TYPE_MAX;
-	}
-}
-
-/**
- * ol_txrx_vdev_pause() - pause vdev network queues
- * @vdev: vdev handle
- * @reason: reason
- *
- * Return: none
- */
-void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	enum netif_reason_type netif_reason;
-
-	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
-		ol_txrx_err("%s: invalid pdev\n", __func__);
-		return;
-	}
-
-	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
-	if (netif_reason == WLAN_REASON_TYPE_MAX)
-		return;
-
-	pdev->pause_cb(vdev->vdev_id, WLAN_STOP_ALL_NETIF_QUEUE, netif_reason);
-}
-
-/**
- * ol_txrx_vdev_unpause() - unpause vdev network queues
- * @vdev: vdev handle
- * @reason: reason
- *
- * Return: none
- */
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	enum netif_reason_type netif_reason;
-
-	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
-		ol_txrx_err(
-				   "%s: invalid pdev\n", __func__);
-		return;
-	}
-
-	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
-	if (netif_reason == WLAN_REASON_TYPE_MAX)
-		return;
-
-	pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
-			netif_reason);
-
-}
-#endif /* ifndef CONFIG_ICNSS */
-#endif /* ifdef QCA_LL_TX_FLOW_CONTROL_V2 */
-
-#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+#if defined(CONFIG_HL_SUPPORT)
 
 /**
  * ol_txrx_pdev_pause() - pause network queues for each vdev
@@ -2035,7 +1834,7 @@
 }
 
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-static void ol_tx_pdev_throttle_tx_timer(unsigned long context)
+static void ol_tx_pdev_throttle_tx_timer(void *context)
 {
 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
 
diff --git a/core/dp/txrx/ol_tx_queue.h b/core/dp/txrx/ol_tx_queue.h
index 4ccc6e7..deb9b84 100644
--- a/core/dp/txrx/ol_tx_queue.h
+++ b/core/dp/txrx/ol_tx_queue.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -227,10 +227,17 @@
 }
 #endif /* defined(CONFIG_HL_SUPPORT) */
 
+#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
+static inline
+void ol_txrx_vdev_flush(struct cdp_vdev *data_vdev)
+{
+}
+#else
 void ol_txrx_vdev_flush(struct cdp_vdev *pvdev);
+#endif
 
 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
-   (defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(CONFIG_ICNSS)) || \
+   (defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
    defined(CONFIG_HL_SUPPORT)
 void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason);
 void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason);
diff --git a/core/dp/txrx/ol_tx_send.c b/core/dp/txrx/ol_tx_send.c
index 92b04b0..ce9c042 100644
--- a/core/dp/txrx/ol_tx_send.c
+++ b/core/dp/txrx/ol_tx_send.c
@@ -105,65 +105,8 @@
 }
 #endif
 
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
-void ol_txrx_flow_control_cb(struct cdp_vdev *pvdev, bool tx_resume)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-
-	qdf_spin_lock_bh(&vdev->flow_control_lock);
-	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
-		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
-	qdf_spin_unlock_bh(&vdev->flow_control_lock);
-
-	return;
-}
-
-/**
- * ol_txrx_flow_control_is_pause() - is osif paused by flow control
- * @vdev: vdev handle
- *
- * Return: true if osif is paused by flow control
- */
-static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)
-{
-	bool is_pause = false;
-
-	if ((vdev->osif_flow_control_is_pause) && (vdev->osif_fc_ctx))
-		is_pause = vdev->osif_flow_control_is_pause(vdev->osif_fc_ctx);
-
-	return is_pause;
-}
-
-/**
- * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
- * @pdev: physical device object
- *
- *
- * Return: None
- */
-static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
-{
-	struct ol_txrx_vdev_t *vdev;
-
-	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
-		if ((qdf_atomic_read(&vdev->os_q_paused) &&
-		    (vdev->tx_fl_hwm != 0)) ||
-		    ol_txrx_flow_control_is_pause(vdev)) {
-			qdf_spin_lock(&pdev->tx_mutex);
-			if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
-				qdf_atomic_set(&vdev->os_q_paused, 0);
-				qdf_spin_unlock(&pdev->tx_mutex);
-				ol_txrx_flow_control_cb((struct cdp_vdev *)vdev,
-						true);
-			} else {
-				qdf_spin_unlock(&pdev->tx_mutex);
-			}
-		}
-	}
-}
-#elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
-
-static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
 {
 	struct ol_txrx_vdev_t *vdev;
 
@@ -185,12 +128,7 @@
 		}
 	}
 }
-#else
-
-static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
-{
-}
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+#endif
 
 static inline uint16_t
 ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
diff --git a/core/dp/txrx/ol_tx_send.h b/core/dp/txrx/ol_tx_send.h
index 673c2fc..9fdeaba 100644
--- a/core/dp/txrx/ol_tx_send.h
+++ b/core/dp/txrx/ol_tx_send.h
@@ -149,4 +149,12 @@
  */
 void ol_txrx_flow_control_cb(struct cdp_vdev *vdev, bool tx_resume);
 
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || (defined(CONFIG_HL_SUPPORT) && \
+	 defined(CONFIG_PER_VDEV_TX_DESC_POOL))
+void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev);
+#else
+static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+}
+#endif
 #endif /* _OL_TX_SEND__H_ */
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index 3378baf..be58266 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -85,15 +85,6 @@
 				QDF_FILE_GRP_READ |	\
 				QDF_FILE_OTH_READ)
 
-#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
-ol_txrx_peer_handle
-ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
-			      uint8_t local_peer_id);
-ol_txrx_peer_handle
-ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
-			      uint8_t local_peer_id,
-			      enum peer_debug_id_type dbg_id);
-#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
 QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
 				     uint8_t *peer_mac,
 				     enum ol_txrx_peer_state state);
@@ -707,92 +698,6 @@
 }
 #endif
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
-/**
- * ol_tx_set_desc_global_pool_size() - set global pool size
- * @num_msdu_desc: total number of descriptors
- *
- * Return: none
- */
-static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
-{
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (!pdev) {
-		qdf_print("%s: pdev is NULL\n", __func__);
-		return;
-	}
-	pdev->num_msdu_desc = num_msdu_desc;
-	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
-		pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
-	ol_txrx_info_high("Global pool size: %d\n",
-		pdev->num_msdu_desc);
-}
-
-/**
- * ol_tx_get_desc_global_pool_size() - get global pool size
- * @pdev: pdev handle
- *
- * Return: global pool size
- */
-static inline
-uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
-{
-	return pdev->num_msdu_desc;
-}
-
-/**
- * ol_tx_get_total_free_desc() - get total free descriptors
- * @pdev: pdev handle
- *
- * Return: total free descriptors
- */
-static inline
-uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
-{
-	struct ol_tx_flow_pool_t *pool = NULL;
-	uint32_t free_desc;
-
-	free_desc = pdev->tx_desc.num_free;
-	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
-	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
-					 flow_pool_list_elem) {
-		qdf_spin_lock_bh(&pool->flow_pool_lock);
-		free_desc += pool->avail_desc;
-		qdf_spin_unlock_bh(&pool->flow_pool_lock);
-	}
-	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
-
-	return free_desc;
-}
-
-#else
-/**
- * ol_tx_get_desc_global_pool_size() - get global pool size
- * @pdev: pdev handle
- *
- * Return: global pool size
- */
-static inline
-uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
-{
-	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
-}
-
-/**
- * ol_tx_get_total_free_desc() - get total free descriptors
- * @pdev: pdev handle
- *
- * Return: total free descriptors
- */
-static inline
-uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
-{
-	return pdev->tx_desc.num_free;
-}
-
-#endif
-
 #if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
 
 /**
@@ -4823,169 +4728,6 @@
 	return vdev->txrx_stats.txack_success;
 }
 
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-
-/**
- * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
- * @sta_id: sta_id
- *
- * Return: vdev handle
- *            NULL if not found.
- */
-static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
-{
-	struct ol_txrx_peer_t *peer = NULL;
-	ol_txrx_pdev_handle pdev = NULL;
-
-	if (sta_id >= WLAN_MAX_STA_COUNT) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "Invalid sta id passed");
-		return NULL;
-	}
-
-	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-	if (!pdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "PDEV not found for sta_id [%d]", sta_id);
-		return NULL;
-	}
-
-	peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
-
-	if (!peer) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
-			  "PEER [%d] not found", sta_id);
-		return NULL;
-	}
-
-	return peer->vdev;
-}
-
-/**
- * ol_txrx_register_tx_flow_control() - register tx flow control callback
- * @vdev_id: vdev_id
- * @flowControl: flow control callback
- * @osif_fc_ctx: callback context
- * @flow_control_is_pause: is vdev paused by flow control
- *
- * Return: 0 for success or error code
- */
-static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
-	ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
-	ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
-{
-	struct ol_txrx_vdev_t *vdev =
-		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
-
-	if (NULL == vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid vdev_id %d", __func__, vdev_id);
-		return -EINVAL;
-	}
-
-	qdf_spin_lock_bh(&vdev->flow_control_lock);
-	vdev->osif_flow_control_cb = flowControl;
-	vdev->osif_flow_control_is_pause = flow_control_is_pause;
-	vdev->osif_fc_ctx = osif_fc_ctx;
-	qdf_spin_unlock_bh(&vdev->flow_control_lock);
-	return 0;
-}
-
-/**
- * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
- *                                            callback
- * @vdev_id: vdev_id
- *
- * Return: 0 for success or error code
- */
-static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
-{
-	struct ol_txrx_vdev_t *vdev =
-		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
-
-	if (NULL == vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid vdev_id", __func__);
-		return -EINVAL;
-	}
-
-	qdf_spin_lock_bh(&vdev->flow_control_lock);
-	vdev->osif_flow_control_cb = NULL;
-	vdev->osif_flow_control_is_pause = NULL;
-	vdev->osif_fc_ctx = NULL;
-	qdf_spin_unlock_bh(&vdev->flow_control_lock);
-	return 0;
-}
-
-/**
- * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
- * @sta_id: sta id
- * @low_watermark: low watermark
- * @high_watermark_offset: high watermark offset value
- *
- * Return: true/false
- */
-static bool
-ol_txrx_get_tx_resource(uint8_t sta_id,
-			unsigned int low_watermark,
-			unsigned int high_watermark_offset)
-{
-	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
-
-	if (NULL == vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
-			  "%s: Invalid sta_id %d", __func__, sta_id);
-		/* Return true so caller do not understand that resource
-		 * is less than low_watermark.
-		 * sta_id validation will be done in ol_tx_send_data_frame
-		 * and if sta_id is not registered then host will drop
-		 * packet.
-		 */
-		return true;
-	}
-
-	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
-
-	if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
-		vdev->tx_fl_lwm = (uint16_t) low_watermark;
-		vdev->tx_fl_hwm =
-			(uint16_t) (low_watermark + high_watermark_offset);
-		/* Not enough free resource, stop TX OS Q */
-		qdf_atomic_set(&vdev->os_q_paused, 1);
-		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
-		return false;
-	}
-	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
-	return true;
-}
-
-/**
- * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
- * @vdev_id: vdev id
- * @pause_q_depth: pause queue depth
- *
- * Return: 0 for success or error code
- */
-static int
-ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
-{
-	struct ol_txrx_vdev_t *vdev =
-		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
-
-	if (NULL == vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid vdev_id %d", __func__, vdev_id);
-		return -EINVAL;
-	}
-
-	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
-	vdev->ll_pause.max_q_depth = pause_q_depth;
-	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-
-	return 0;
-}
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
-
 /**
  * ol_txrx_display_stats() - Display OL TXRX display stats
  * @value: Module id for which stats needs to be displayed
@@ -5489,27 +5231,6 @@
 	return rc;
 }
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
-/**
- * ol_txrx_register_pause_cb() - register pause callback
- * @pause_cb: pause callback
- *
- * Return: QDF status
- */
-static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
-	tx_pause_callback pause_cb)
-{
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (!pdev || !pause_cb) {
-		ol_txrx_err("pdev or pause_cb is NULL");
-		return QDF_STATUS_E_INVAL;
-	}
-	pdev->pause_cb = pause_cb;
-	return QDF_STATUS_SUCCESS;
-}
-#endif
-
 #ifdef RECEIVE_OFFLOAD
 /**
  * ol_txrx_offld_flush_handler() - offld flush handler
@@ -5759,35 +5480,6 @@
 		       qdf_nbuf_data(nbuf), len, true);
 }
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
-bool
-ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
-{
-	struct ol_tx_flow_pool_t *pool;
-	bool enough_desc_flag;
-
-	if (!vdev)
-		return false;
-
-	pool = vdev->pool;
-
-	if (!pool)
-		return false;
-
-	qdf_spin_lock_bh(&pool->flow_pool_lock);
-	enough_desc_flag = (pool->avail_desc < (pool->stop_th +
-				OL_TX_NON_FWD_RESERVE))
-		? false : true;
-	qdf_spin_unlock_bh(&pool->flow_pool_lock);
-	return enough_desc_flag;
-}
-#else
-bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
-{
-	return true;
-}
-#endif
-
 /**
  * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
  * @vdev_id: vdev_id
diff --git a/core/dp/txrx/ol_txrx.h b/core/dp/txrx/ol_txrx.h
index 609279d..090f029 100644
--- a/core/dp/txrx/ol_txrx.h
+++ b/core/dp/txrx/ol_txrx.h
@@ -100,6 +100,17 @@
 	return NULL;
 }
 #endif
+
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev,
+			      uint8_t local_peer_id);
+ol_txrx_peer_handle
+ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
+				 uint8_t local_peer_id,
+				 enum peer_debug_id_type dbg_id);
+#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
+
 /*
  * @nbuf: buffer which contains data to be displayed
  * @nbuf_paddr: physical address of the buffer
@@ -110,6 +121,43 @@
 void
 ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
 
+struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
+
+void *ol_txrx_find_peer_by_addr(struct cdp_pdev *pdev,
+				uint8_t *peer_addr,
+				uint8_t *peer_id);
+
+void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *scn);
+void peer_unmap_timer_work_function(void *);
+void peer_unmap_timer_handler(void *data);
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
+				     ol_txrx_tx_flow_control_fp flow_control,
+				     void *osif_fc_ctx,
+				     ol_txrx_tx_flow_control_is_pause_fp
+				     flow_control_is_pause);
+
+int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id);
+
+bool ol_txrx_get_tx_resource(uint8_t sta_id,
+			     unsigned int low_watermark,
+			     unsigned int high_watermark_offset);
+
+int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth);
+#endif
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+	return pdev->num_msdu_desc;
+}
+
+QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
+				     tx_pause_callback pause_cb);
 /**
  * ol_txrx_fwd_desc_thresh_check() - check to forward packet to tx path
  * @vdev: which virtual device the frames were addressed to
@@ -130,15 +178,36 @@
  *         false; not enough descriptors, drop the packet
  */
 bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev);
+#else
+/**
+ * ol_tx_get_desc_global_pool_size() - get global pool size
+ * @pdev: pdev handle
+ *
+ * Return: global pool size
+ */
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
+}
 
-struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
+/**
+ * ol_tx_get_total_free_desc() - get total free descriptors
+ * @pdev: pdev handle
+ *
+ * Return: total free descriptors
+ */
+static inline
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
+{
+	return pdev->tx_desc.num_free;
+}
 
-void *ol_txrx_find_peer_by_addr(struct cdp_pdev *pdev,
-				uint8_t *peer_addr,
-				uint8_t *peer_id);
+static inline
+bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
+{
+	return true;
+}
 
-void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *scn);
-void peer_unmap_timer_work_function(void *);
-void peer_unmap_timer_handler(void *data);
-
+#endif
 #endif /* _OL_TXRX__H_ */
diff --git a/core/dp/txrx/ol_txrx_flow_control.c b/core/dp/txrx/ol_txrx_flow_control.c
index 307cc29..df687c2 100644
--- a/core/dp/txrx/ol_txrx_flow_control.c
+++ b/core/dp/txrx/ol_txrx_flow_control.c
@@ -83,6 +83,91 @@
 }
 #endif
 
+bool
+ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
+{
+	struct ol_tx_flow_pool_t *pool;
+	bool enough_desc_flag;
+
+	if (!vdev)
+		return false;
+
+	pool = vdev->pool;
+
+	if (!pool)
+		return false;
+
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	enough_desc_flag = (pool->avail_desc < (pool->stop_th +
+				OL_TX_NON_FWD_RESERVE))
+		? false : true;
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+	return enough_desc_flag;
+}
+
+/**
+ * ol_txrx_register_pause_cb() - register pause callback
+ * @pause_cb: pause callback
+ *
+ * Return: QDF status
+ */
+QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
+				     tx_pause_callback pause_cb)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+
+	if (!pdev || !pause_cb) {
+		ol_txrx_err("pdev or pause_cb is NULL");
+		return QDF_STATUS_E_INVAL;
+	}
+	pdev->pause_cb = pause_cb;
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * ol_tx_set_desc_global_pool_size() - set global pool size
+ * @num_msdu_desc: total number of descriptors
+ *
+ * Return: none
+ */
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+
+	if (!pdev) {
+		qdf_print("%s: pdev is NULL\n", __func__);
+		return;
+	}
+	pdev->num_msdu_desc = num_msdu_desc;
+	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
+		pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
+	ol_txrx_info_high("Global pool size: %d\n", pdev->num_msdu_desc);
+}
+
+/**
+ * ol_tx_get_total_free_desc() - get total free descriptors
+ * @pdev: pdev handle
+ *
+ * Return: total free descriptors
+ */
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_flow_pool_t *pool = NULL;
+	uint32_t free_desc;
+
+	free_desc = pdev->tx_desc.num_free;
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
+		      flow_pool_list_elem) {
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
+		free_desc += pool->avail_desc;
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+	}
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+	return free_desc;
+}
+
 /**
  * ol_tx_register_flow_control() - Register fw based tx flow control
  * @pdev: pdev handle
@@ -1142,3 +1227,114 @@
 	ol_tx_dec_pool_ref(pool, false);
 }
 #endif
+
+/**
+ * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
+ * @reason: network queue pause reason
+ *
+ * Return: netif_reason_type
+ */
+static enum netif_reason_type
+ol_txrx_map_to_netif_reason_type(uint32_t reason)
+{
+	switch (reason) {
+	case OL_TXQ_PAUSE_REASON_FW:
+		return WLAN_FW_PAUSE;
+	case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
+		return WLAN_PEER_UNAUTHORISED;
+	case OL_TXQ_PAUSE_REASON_TX_ABORT:
+		return WLAN_TX_ABORT;
+	case OL_TXQ_PAUSE_REASON_VDEV_STOP:
+		return WLAN_VDEV_STOP;
+	case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
+		return WLAN_THERMAL_MITIGATION;
+	default:
+		ol_txrx_err(
+			   "%s: reason not supported %d\n",
+			   __func__, reason);
+		return WLAN_REASON_TYPE_MAX;
+	}
+}
+
+/*
+ * ol_txrx_vdev_pause() - pause vdev network queues
+ * @vdev: vdev handle
+ * @reason: network queue pause reason
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	enum netif_reason_type netif_reason;
+
+	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
+		ol_txrx_err("%s: invalid pdev\n", __func__);
+		return;
+	}
+
+	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
+	if (netif_reason == WLAN_REASON_TYPE_MAX)
+		return;
+
+	pdev->pause_cb(vdev->vdev_id, WLAN_STOP_ALL_NETIF_QUEUE, netif_reason);
+}
+
+/**
+ * ol_txrx_vdev_unpause() - unpause vdev network queues
+ * @vdev: vdev handle
+ * @reason: network queue pause reason
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	enum netif_reason_type netif_reason;
+
+	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
+		ol_txrx_err("%s: invalid pdev\n", __func__);
+		return;
+	}
+
+	netif_reason = ol_txrx_map_to_netif_reason_type(reason);
+	if (netif_reason == WLAN_REASON_TYPE_MAX)
+		return;
+
+	pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
+			netif_reason);
+}
+
+/**
+ * ol_txrx_pdev_pause() - pause network queues for each vdev
+ * @pdev: pdev handle
+ * @reason: network queue pause reason
+ *
+ * Return: none
+ */
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
+
+	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
+		ol_txrx_vdev_pause((struct cdp_vdev *)vdev, reason);
+	}
+}
+
+/**
+ * ol_txrx_pdev_unpause() - unpause network queues for each vdev
+ * @pdev: pdev handle
+ * @reason: network queue pause reason
+ *
+ * Return: none
+ */
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
+
+	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
+		ol_txrx_vdev_unpause((struct cdp_vdev *)vdev, reason);
+	}
+}
diff --git a/core/dp/txrx/ol_txrx_legacy_flow_control.c b/core/dp/txrx/ol_txrx_legacy_flow_control.c
new file mode 100644
index 0000000..bcf28c2
--- /dev/null
+++ b/core/dp/txrx/ol_txrx_legacy_flow_control.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* OS abstraction libraries */
+#include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
+#include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
+#include <qdf_util.h>           /* qdf_unlikely */
+
+/* APIs for other modules */
+#include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
+
+/* internal header files relevant for all systems */
+#include <ol_txrx_internal.h>   /* TXRX_ASSERT1 */
+#include <ol_tx_desc.h>         /* ol_tx_desc */
+#include <ol_tx_send.h>         /* ol_tx_send */
+#include <ol_txrx.h>            /* ol_txrx_get_vdev_from_vdev_id */
+
+/* internal header files relevant only for HL systems */
+#include <ol_tx_queue.h>        /* ol_tx_enqueue */
+
+/* internal header files relevant only for specific systems (Pronto) */
+#include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
+#include <ol_tx.h>
+#include <ol_cfg.h>
+#include <cdp_txrx_handle.h>
+
+/**
+ * ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
+ *
+ * @data_vdev - the virtual device being paused
+ * @reason - the reason for which vdev queue is getting paused
+ *
+ * This function applies primarily to HL systems, but also
+ * applies to LL systems that use per-vdev tx queues for MCC or
+ * thermal throttling. As an example, this function could be
+ * used when a single-channel physical device supports multiple
+ * channels by jumping back and forth between the channels in a
+ * time-shared manner.  As the device is switched from channel A
+ * to channel B, the virtual devices that operate on channel A
+ * will be paused.
+ *
+ */
+void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	/* TO DO: log the queue pause */
+	/* acquire the mutex lock, since we'll be modifying the queues */
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	vdev->ll_pause.paused_reason |= reason;
+	vdev->ll_pause.q_pause_cnt++;
+	vdev->ll_pause.is_q_paused = true;
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+/**
+ * ol_txrx_vdev_unpause - Resume tx for the specified virtual device
+ *
+ * @data_vdev - the virtual device being unpaused
+ * @reason - the reason for which vdev queue is getting unpaused
+ *
+ * This function applies primarily to HL systems, but also applies to
+ * LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *
+ */
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+	/* TO DO: log the queue unpause */
+	/* acquire the mutex lock, since we'll be modifying the queues */
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	if (vdev->ll_pause.paused_reason & reason) {
+		vdev->ll_pause.paused_reason &= ~reason;
+		if (!vdev->ll_pause.paused_reason) {
+			vdev->ll_pause.is_q_paused = false;
+			vdev->ll_pause.q_unpause_cnt++;
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			ol_tx_vdev_ll_pause_queue_send((void *)vdev);
+		} else {
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		}
+	} else {
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	}
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+/**
+ * ol_txrx_vdev_flush - Drop all tx data for the specified virtual device
+ *
+ * @data_vdev - the virtual device being flushed
+ *
+ *  This function applies primarily to HL systems, but also applies to
+ *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *  This function would typically be used by the ctrl SW after it parks
+ *  a STA vdev and then resumes it, but to a new AP.  In this case, though
+ *  the same vdev can be used, any old tx frames queued inside it would be
+ *  stale, and would need to be discarded.
+ *
+ */
+void ol_txrx_vdev_flush(struct cdp_vdev *pvdev)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_timer_stop(&vdev->ll_pause.timer);
+	vdev->ll_pause.is_q_timer_on = false;
+	while (vdev->ll_pause.txq.head) {
+		qdf_nbuf_t next =
+			qdf_nbuf_next(vdev->ll_pause.txq.head);
+		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
+		if (QDF_NBUF_CB_PADDR(vdev->ll_pause.txq.head) &&
+		    !qdf_nbuf_ipa_owned_get(vdev->ll_pause.txq.head)) {
+			qdf_nbuf_unmap(vdev->pdev->osdev,
+				       vdev->ll_pause.txq.head,
+				       QDF_DMA_TO_DEVICE);
+		}
+		qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
+				 QDF_NBUF_PKT_ERROR);
+		vdev->ll_pause.txq.head = next;
+	}
+	vdev->ll_pause.txq.tail = NULL;
+	vdev->ll_pause.txq.depth = 0;
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+}
+
+#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
+#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
+
+static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
+{
+	int max_to_accept;
+
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	if (vdev->ll_pause.paused_reason) {
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		return;
+	}
+
+	/*
+	 * Send as much of the backlog as possible, but leave some margin
+	 * of unallocated tx descriptors that can be used for new frames
+	 * being transmitted by other vdevs.
+	 * Ideally there would be a scheduler, which would not only leave
+	 * some margin for new frames for other vdevs, but also would
+	 * fairly apportion the tx descriptors between multiple vdevs that
+	 * have backlogs in their pause queues.
+	 * However, the fairness benefit of having a scheduler for frames
+	 * from multiple vdev's pause queues is not sufficient to outweigh
+	 * the extra complexity.
+	 */
+	max_to_accept = vdev->pdev->tx_desc.num_free -
+		OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
+	while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
+		qdf_nbuf_t tx_msdu;
+
+		max_to_accept--;
+		vdev->ll_pause.txq.depth--;
+		tx_msdu = vdev->ll_pause.txq.head;
+		if (tx_msdu) {
+			vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
+			if (!vdev->ll_pause.txq.head)
+				vdev->ll_pause.txq.tail = NULL;
+			qdf_nbuf_set_next(tx_msdu, NULL);
+			QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
+						QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
+			tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
+			/*
+			 * It is unexpected that ol_tx_ll would reject the frame
+			 * since we checked that there's room for it, though
+			 * there's an infinitesimal possibility that between the
+			 * time we checked the room available and now, a
+			 * concurrent batch of tx frames used up all the room.
+			 * For simplicity, just drop the frame.
+			 */
+			if (tx_msdu) {
+				qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
+					       QDF_DMA_TO_DEVICE);
+				qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
+			}
+		}
+	}
+	if (vdev->ll_pause.txq.depth) {
+		qdf_timer_stop(&vdev->ll_pause.timer);
+		if (!qdf_atomic_read(&vdev->delete.detaching)) {
+			qdf_timer_start(&vdev->ll_pause.timer,
+					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+			vdev->ll_pause.is_q_timer_on = true;
+		}
+		if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
+			vdev->ll_pause.q_overflow_cnt++;
+	}
+
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+}
+
+static qdf_nbuf_t
+ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
+			      qdf_nbuf_t msdu_list, uint8_t start_timer)
+{
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	while (msdu_list &&
+	       vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
+		qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
+
+		QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
+					     QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
+		DPTRACE(qdf_dp_trace(msdu_list,
+			QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
+			QDF_TRACE_DEFAULT_PDEV_ID,
+			qdf_nbuf_data_addr(msdu_list),
+			sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
+
+		vdev->ll_pause.txq.depth++;
+		if (!vdev->ll_pause.txq.head) {
+			vdev->ll_pause.txq.head = msdu_list;
+			vdev->ll_pause.txq.tail = msdu_list;
+		} else {
+			qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
+		}
+		vdev->ll_pause.txq.tail = msdu_list;
+
+		msdu_list = next;
+	}
+	if (vdev->ll_pause.txq.tail)
+		qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
+
+	if (start_timer) {
+		qdf_timer_stop(&vdev->ll_pause.timer);
+		if (!qdf_atomic_read(&vdev->delete.detaching)) {
+			qdf_timer_start(&vdev->ll_pause.timer,
+					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+			vdev->ll_pause.is_q_timer_on = true;
+		}
+	}
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+	return msdu_list;
+}
+
+/*
+ * Store up the tx frame in the vdev's tx queue if the vdev is paused.
+ * If there are too many frames in the tx queue, reject it.
+ */
+qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
+{
+	uint16_t eth_type;
+	uint32_t paused_reason;
+
+	if (!msdu_list)
+		return NULL;
+
+	paused_reason = vdev->ll_pause.paused_reason;
+	if (paused_reason) {
+		if (qdf_unlikely((paused_reason &
+				  OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
+				 paused_reason)) {
+			eth_type = (((struct ethernet_hdr_t *)
+				     qdf_nbuf_data(msdu_list))->
+				    ethertype[0] << 8) |
+				   (((struct ethernet_hdr_t *)
+				     qdf_nbuf_data(msdu_list))->ethertype[1]);
+			if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
+				msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
+				return msdu_list;
+			}
+		}
+		msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
+	} else {
+		if (vdev->ll_pause.txq.depth > 0 ||
+		    vdev->pdev->tx_throttle.current_throttle_level !=
+		    THROTTLE_LEVEL_0) {
+			/*
+			 * not paused, but there is a backlog of frms
+			 * from a prior pause or throttle off phase
+			 */
+			msdu_list = ol_tx_vdev_pause_queue_append(
+				vdev, msdu_list, 0);
+			/*
+			 * if throttle is disabled or phase is "on",
+			 * send the frame
+			 */
+			if (vdev->pdev->tx_throttle.current_throttle_level ==
+			    THROTTLE_LEVEL_0 ||
+			    vdev->pdev->tx_throttle.current_throttle_phase ==
+			    THROTTLE_PHASE_ON) {
+				/*
+				 * send as many frames as possible
+				 * from the vdevs backlog
+				 */
+				ol_tx_vdev_ll_pause_queue_send_base(vdev);
+			}
+		} else {
+			/*
+			 * not paused, no throttle and no backlog -
+			 * send the new frames
+			 */
+			msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
+		}
+	}
+	return msdu_list;
+}
+
+/*
+ * Run through the transmit queues for all the vdevs and
+ * send the pending frames
+ */
+void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
+{
+	int max_to_send;        /* tracks how many frames have been sent */
+	qdf_nbuf_t tx_msdu;
+	struct ol_txrx_vdev_t *vdev = NULL;
+	uint8_t more;
+
+	if (!pdev)
+		return;
+
+	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
+		return;
+
+	/* ensure that we send no more than tx_threshold frames at once */
+	max_to_send = pdev->tx_throttle.tx_threshold;
+
+	/* round robin through the vdev queues for the given pdev */
+
+	/*
+	 * Potential improvement: download several frames from the same vdev
+	 * at a time, since it is more likely that those frames could be
+	 * aggregated together, remember which vdev was serviced last,
+	 * so the next call this function can resume the round-robin
+	 * traversing where the current invocation left off
+	 */
+	do {
+		more = 0;
+		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+			qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+			if (vdev->ll_pause.txq.depth) {
+				if (vdev->ll_pause.paused_reason) {
+					qdf_spin_unlock_bh(&vdev->ll_pause.
+							   mutex);
+					continue;
+				}
+
+				tx_msdu = vdev->ll_pause.txq.head;
+				if (!tx_msdu) {
+					qdf_spin_unlock_bh(&vdev->ll_pause.
+							   mutex);
+					continue;
+				}
+
+				max_to_send--;
+				vdev->ll_pause.txq.depth--;
+
+				vdev->ll_pause.txq.head =
+					qdf_nbuf_next(tx_msdu);
+
+				if (!vdev->ll_pause.txq.head)
+					vdev->ll_pause.txq.tail = NULL;
+
+				qdf_nbuf_set_next(tx_msdu, NULL);
+				tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
+				/*
+				 * It is unexpected that ol_tx_ll would reject
+				 * the frame, since we checked that there's
+				 * room for it, though there's an infinitesimal
+				 * possibility that between the time we checked
+				 * the room available and now, a concurrent
+				 * batch of tx frames used up all the room.
+				 * For simplicity, just drop the frame.
+				 */
+				if (tx_msdu) {
+					qdf_nbuf_unmap(pdev->osdev, tx_msdu,
+						       QDF_DMA_TO_DEVICE);
+					qdf_nbuf_tx_free(tx_msdu,
+							 QDF_NBUF_PKT_ERROR);
+				}
+			}
+			/*check if there are more msdus to transmit */
+			if (vdev->ll_pause.txq.depth)
+				more = 1;
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		}
+	} while (more && max_to_send);
+
+	vdev = NULL;
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+		if (vdev->ll_pause.txq.depth) {
+			qdf_timer_stop(&pdev->tx_throttle.tx_timer);
+			qdf_timer_start(
+				&pdev->tx_throttle.tx_timer,
+				OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			return;
+		}
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	}
+}
+
+void ol_tx_vdev_ll_pause_queue_send(void *context)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+	if (pdev &&
+	    pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
+	    pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
+		return;
+	ol_tx_vdev_ll_pause_queue_send_base(vdev);
+}
+
+/**
+ * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
+ * @sta_id: sta_id
+ *
+ * Return: vdev handle
+ *            NULL if not found.
+ */
+static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
+{
+	struct ol_txrx_peer_t *peer = NULL;
+	ol_txrx_pdev_handle pdev = NULL;
+
+	if (sta_id >= WLAN_MAX_STA_COUNT) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "Invalid sta id passed");
+		return NULL;
+	}
+
+	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	if (!pdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "PDEV not found for sta_id [%d]", sta_id);
+		return NULL;
+	}
+
+	peer = ol_txrx_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
+
+	if (!peer) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "PEER [%d] not found", sta_id);
+		return NULL;
+	}
+
+	return peer->vdev;
+}
+
+/**
+ * ol_txrx_register_tx_flow_control() - register tx flow control callback
+ * @vdev_id: vdev_id
+ * @flowControl: flow control callback
+ * @osif_fc_ctx: callback context
+ * @flow_control_is_pause: is vdev paused by flow control
+ *
+ * Return: 0 for success or error code
+ */
+int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
+				     ol_txrx_tx_flow_control_fp flowControl,
+				     void *osif_fc_ctx,
+				     ol_txrx_tx_flow_control_is_pause_fp
+				     flow_control_is_pause)
+{
+	struct ol_txrx_vdev_t *vdev =
+		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid vdev_id %d", __func__, vdev_id);
+		return -EINVAL;
+	}
+
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
+	vdev->osif_flow_control_cb = flowControl;
+	vdev->osif_flow_control_is_pause = flow_control_is_pause;
+	vdev->osif_fc_ctx = osif_fc_ctx;
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
+	return 0;
+}
+
+/**
+ * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
+ *                                            callback
+ * @vdev_id: vdev_id
+ *
+ * Return: 0 for success or error code
+ */
+int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
+{
+	struct ol_txrx_vdev_t *vdev =
+		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid vdev_id", __func__);
+		return -EINVAL;
+	}
+
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
+	vdev->osif_flow_control_cb = NULL;
+	vdev->osif_flow_control_is_pause = NULL;
+	vdev->osif_fc_ctx = NULL;
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
+	return 0;
+}
+
+/**
+ * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
+ * @sta_id: sta id
+ * @low_watermark: low watermark
+ * @high_watermark_offset: high watermark offset value
+ *
+ * Return: true/false
+ */
+bool
+ol_txrx_get_tx_resource(uint8_t sta_id,
+			unsigned int low_watermark,
+			unsigned int high_watermark_offset)
+{
+	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "%s: Invalid sta_id %d", __func__, sta_id);
+		/* Return true so caller do not understand that resource
+		 * is less than low_watermark.
+		 * sta_id validation will be done in ol_tx_send_data_frame
+		 * and if sta_id is not registered then host will drop
+		 * packet.
+		 */
+		return true;
+	}
+
+	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+
+	if (vdev->pdev->tx_desc.num_free < (uint16_t)low_watermark) {
+		vdev->tx_fl_lwm = (uint16_t)low_watermark;
+		vdev->tx_fl_hwm =
+			(uint16_t)(low_watermark + high_watermark_offset);
+		/* Not enough free resource, stop TX OS Q */
+		qdf_atomic_set(&vdev->os_q_paused, 1);
+		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+		return false;
+	}
+	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+	return true;
+}
+
+/**
+ * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
+ * @vdev_id: vdev id
+ * @pause_q_depth: pause queue depth
+ *
+ * Return: 0 for success or error code
+ */
+int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
+{
+	struct ol_txrx_vdev_t *vdev =
+		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid vdev_id %d", __func__, vdev_id);
+		return -EINVAL;
+	}
+
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	vdev->ll_pause.max_q_depth = pause_q_depth;
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+	return 0;
+}
+
+void ol_txrx_flow_control_cb(struct cdp_vdev *pvdev, bool tx_resume)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
+	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
+		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
+}
+
+/**
+ * ol_txrx_flow_control_is_pause() - is osif paused by flow control
+ * @vdev: vdev handle
+ *
+ * Return: true if osif is paused by flow control
+ */
+static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)
+{
+	bool is_pause = false;
+
+	if ((vdev->osif_flow_control_is_pause) && (vdev->osif_fc_ctx))
+		is_pause = vdev->osif_flow_control_is_pause(vdev->osif_fc_ctx);
+
+	return is_pause;
+}
+
+/**
+ * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
+ * @pdev: physical device object
+ *
+ *
+ * Return: None
+ */
+void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+	struct ol_txrx_vdev_t *vdev;
+
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		if ((qdf_atomic_read(&vdev->os_q_paused) &&
+		     (vdev->tx_fl_hwm != 0)) ||
+		     ol_txrx_flow_control_is_pause(vdev)) {
+			qdf_spin_lock(&pdev->tx_mutex);
+			if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
+				qdf_atomic_set(&vdev->os_q_paused, 0);
+				qdf_spin_unlock(&pdev->tx_mutex);
+				ol_txrx_flow_control_cb((struct cdp_vdev *)vdev,
+							true);
+			} else {
+				qdf_spin_unlock(&pdev->tx_mutex);
+			}
+		}
+	}
+}
+