qcacld-3.0: Add support for TCP delayed ack in driver

qcacld-2.0 to qcacld-3.0 propagation

This change adds support for driver supported TCP
delayed ack to increase TCP RX performance in
third-party platform which does't support kernel
TCP delayed ack feature.

TCP delayed ack is dependent on count and timer
values. Whatever is achieved first will trigger
sending TCP ack.

This feature can be controlled through ini values.
gDriverDelAckTimerValue - timer value in ms
gDriverDelAckPktCount - delayed ack count
gDriverDelAckEnable - enable/disable feature

Change-Id: I8105bbb90965295b5a4aefeb00d344a90155974d
CRs-fixed: 2414224
diff --git a/Kbuild b/Kbuild
index 083ae69..012798b 100644
--- a/Kbuild
+++ b/Kbuild
@@ -2176,6 +2176,8 @@
 cppflags-y += -DCONFIG_ATH_PCIE_MAX_PERF=0 -DCONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD=0 -DCONFIG_DISABLE_CDC_MAX_PERF_WAR=0
 endif
 
+cppflags-$(CONFIG_QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK) += -DQCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+
 cppflags-$(CONFIG_WLAN_FEATURE_11W) += -DWLAN_FEATURE_11W
 
 cppflags-$(CONFIG_QCA_TXDESC_SANITY_CHECKS) += -DQCA_SUPPORT_TXDESC_SANITY_CHECKS
diff --git a/configs/genoa.common b/configs/genoa.common
index 2fbd469..aa7f0aa 100644
--- a/configs/genoa.common
+++ b/configs/genoa.common
@@ -209,7 +209,7 @@
 #Enable STATE MACHINE HISTORY
 CONFIG_SM_ENG_HIST := n
 
-ifeq (y,$(findstring y,$(CONFIG_ARCH_MSM) $(CONFIG_ARCH_QCOM)))
+ifeq (y,$(findstring y,$(CONFIG_ARCH_MSM) $(CONFIG_ARCH_QCOM) $(CONFIG_QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK)))
 CONFIG_WLAN_FEATURE_DP_BUS_BANDWIDTH := y
 endif
 
diff --git a/core/cds/src/cds_api.c b/core/cds/src/cds_api.c
index b5955e3..1757e74 100644
--- a/core/cds/src/cds_api.c
+++ b/core/cds/src/cds_api.c
@@ -329,6 +329,25 @@
 {}
 #endif
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+static inline void
+cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc *psoc,
+			      struct txrx_pdev_cfg_param_t *cdp_cfg)
+{
+	cdp_cfg->del_ack_enable =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_ENABLE);
+	cdp_cfg->del_ack_pkt_count =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_PKT_CNT);
+	cdp_cfg->del_ack_timer_value =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE);
+}
+#else
+static inline void
+cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc *psoc,
+			      struct txrx_pdev_cfg_param_t *cdp_cfg)
+{}
+#endif
+
 /**
  * cds_cdp_cfg_attach() - attach data path config module
  * @cds_cfg: generic platform level config instance
@@ -366,6 +385,8 @@
 	cdp_cfg.disable_intra_bss_fwd =
 		cfg_get(psoc, CFG_DP_AP_STA_SECURITY_SEPERATION);
 
+	cds_cdp_update_del_ack_params(psoc, &cdp_cfg);
+
 	gp_cds_context->cfg_ctx = cdp_cfg_attach(soc, gp_cds_context->qdf_ctx,
 					(void *)(&cdp_cfg));
 	if (!gp_cds_context->cfg_ctx) {
diff --git a/core/dp/ol/inc/ol_cfg.h b/core/dp/ol/inc/ol_cfg.h
index 328cc69..8da541e 100644
--- a/core/dp/ol/inc/ol_cfg.h
+++ b/core/dp/ol/inc/ol_cfg.h
@@ -119,6 +119,15 @@
 	uint32_t uc_tx_partition_base;
 	/* Flag to indicate whether new htt format is supported */
 	bool new_htt_format_enabled;
+
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+	/* enable the tcp delay ack feature in the driver */
+	bool  del_ack_enable;
+	/* timeout if no more tcp ack frames, unit is ms */
+	uint16_t del_ack_timer_value;
+	/* the maximum number of replaced tcp ack frames */
+	uint16_t del_ack_pkt_count;
+#endif
 };
 
 /**
@@ -721,6 +730,55 @@
 	return cfg->new_htt_format_enabled;
 }
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+/**
+ * ol_cfg_get_del_ack_timer_value() - get delayed ack timer value
+ * @cfg_pdev: pdev handle
+ *
+ * Return: timer value
+ */
+int ol_cfg_get_del_ack_timer_value(struct cdp_cfg *cfg_pdev);
+
+/**
+ * ol_cfg_get_del_ack_enable_value() - get delayed ack enable value
+ * @cfg_pdev: pdev handle
+ *
+ * Return: enable/disable
+ */
+bool ol_cfg_get_del_ack_enable_value(struct cdp_cfg *cfg_pdev);
+
+/**
+ * ol_cfg_get_del_ack_count_value() - get delayed ack count value
+ * @cfg_pdev: pdev handle
+ *
+ * Return: count value
+ */
+int ol_cfg_get_del_ack_count_value(struct cdp_cfg *cfg_pdev);
+
+/**
+ * ol_cfg_update_del_ack_params() - update delayed ack params
+ * @cfg_ctx: cfg context
+ * @cfg_param: parameters
+ *
+ * Return: none
+ */
+void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
+				  struct txrx_pdev_cfg_param_t *cfg_param);
+#else
+/**
+ * ol_cfg_update_del_ack_params() - update delayed ack params
+ * @cfg_ctx: cfg context
+ * @cfg_param: parameters
+ *
+ * Return: none
+ */
+static inline
+void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
+				  struct txrx_pdev_cfg_param_t *cfg_param)
+{
+}
+#endif
+
 /**
  * ol_cfg_get_wrr_skip_weight() - brief Query for the param of wrr_skip_weight
  * @pdev: handle to the physical device.
diff --git a/core/dp/txrx/ol_cfg.c b/core/dp/txrx/ol_cfg.c
index db6ace7..205186e 100644
--- a/core/dp/txrx/ol_cfg.c
+++ b/core/dp/txrx/ol_cfg.c
@@ -119,6 +119,23 @@
 }
 #endif
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+/**
+ * ol_cfg_update_del_ack_params() - update delayed ack params
+ * @cfg_ctx: cfg context
+ * @cfg_param: parameters
+ *
+ * Return: none
+ */
+void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
+				  struct txrx_pdev_cfg_param_t *cfg_param)
+{
+	cfg_ctx->del_ack_enable = cfg_param->del_ack_enable;
+	cfg_ctx->del_ack_timer_value = cfg_param->del_ack_timer_value;
+	cfg_ctx->del_ack_pkt_count = cfg_param->del_ack_pkt_count;
+}
+#endif
+
 /* FIX THIS -
  * For now, all these configuration parameters are hardcoded.
  * Many of these should actually be determined dynamically instead.
@@ -176,6 +193,8 @@
 	cfg_ctx->enable_flow_steering = cfg_param->enable_flow_steering;
 	cfg_ctx->disable_intra_bss_fwd = cfg_param->disable_intra_bss_fwd;
 
+	ol_cfg_update_del_ack_params(cfg_ctx, cfg_param);
+
 	ol_tx_set_flow_control_parameters((struct cdp_cfg *)cfg_ctx, cfg_param);
 
 	for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
@@ -194,6 +213,47 @@
 	return (struct cdp_cfg *)cfg_ctx;
 }
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+/**
+ * ol_cfg_get_del_ack_timer_value() - get delayed ack timer value
+ * @cfg_pdev: pdev handle
+ *
+ * Return: timer value
+ */
+int ol_cfg_get_del_ack_timer_value(struct cdp_cfg *cfg_pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
+
+	return cfg->del_ack_timer_value;
+}
+
+/**
+ * ol_cfg_get_del_ack_enable_value() - get delayed ack enable value
+ * @cfg_pdev: pdev handle
+ *
+ * Return: enable/disable
+ */
+bool ol_cfg_get_del_ack_enable_value(struct cdp_cfg *cfg_pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
+
+	return cfg->del_ack_enable;
+}
+
+/**
+ * ol_cfg_get_del_ack_count_value() - get delayed ack count value
+ * @pdev: cfg_pdev handle
+ *
+ * Return: count value
+ */
+int ol_cfg_get_del_ack_count_value(struct cdp_cfg *cfg_pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
+
+	return cfg->del_ack_pkt_count;
+}
+#endif
+
 int ol_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
 {
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
diff --git a/core/dp/txrx/ol_tx_hl.c b/core/dp/txrx/ol_tx_hl.c
index 16c68e9..998681c 100644
--- a/core/dp/txrx/ol_tx_hl.c
+++ b/core/dp/txrx/ol_tx_hl.c
@@ -54,6 +54,7 @@
 #include <pktlog_ac_fmt.h>
 #include <cdp_txrx_handle.h>
 #include <wlan_reg_services_api.h>
+#include "qdf_hrtimer.h"
 
 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
 static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
@@ -492,6 +493,7 @@
  * @tx_spec: indicate what non-standard transmission actions to apply
  * @msdu_list: the tx frames to send
  * @tx_comp_req: tx completion req
+ * @call_sched: will schedule the tx if true
  *
  * Return: NULL if all MSDUs are accepted
  */
@@ -500,7 +502,8 @@
 	ol_txrx_vdev_handle vdev,
 	enum ol_tx_spec tx_spec,
 	qdf_nbuf_t msdu_list,
-	int tx_comp_req)
+	int tx_comp_req,
+	bool call_sched)
 {
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	qdf_nbuf_t msdu = msdu_list;
@@ -697,10 +700,734 @@
 MSDU_LOOP_BOTTOM:
 			msdu = next;
 		}
-		ol_tx_sched(pdev);
+
+		if (call_sched)
+			ol_tx_sched(pdev);
 		return NULL; /* all MSDUs were accepted */
 }
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+
+/**
+ * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
+ * @ppdev: the data physical device
+ *
+ * Return: none
+ */
+void
+ol_tx_pdev_reset_driver_del_ack(struct cdp_pdev *ppdev)
+{
+	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
+	struct ol_txrx_vdev_t *vdev;
+
+	if (!pdev)
+		return;
+
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		vdev->driver_del_ack_enabled = false;
+
+		dp_debug("vdev_id %d driver_del_ack_enabled %d",
+			 vdev->vdev_id, vdev->driver_del_ack_enabled);
+	}
+}
+
+/**
+ * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
+ * @vdev_id: vdev id
+ * @rx_packets: number of rx packets
+ * @time_in_ms: time in ms
+ * @high_th: high threshold
+ * @low_th: low threshold
+ *
+ * Return: none
+ */
+void
+ol_tx_vdev_set_driver_del_ack_enable(uint8_t vdev_id,
+				     unsigned long rx_packets,
+				     uint32_t time_in_ms,
+				     uint32_t high_th,
+				     uint32_t low_th)
+{
+	struct ol_txrx_vdev_t *vdev =
+			(struct ol_txrx_vdev_t *)
+			ol_txrx_get_vdev_from_vdev_id(vdev_id);
+	bool old_driver_del_ack_enabled;
+
+	if ((!vdev) || (low_th > high_th))
+		return;
+
+	old_driver_del_ack_enabled = vdev->driver_del_ack_enabled;
+	if (rx_packets > high_th)
+		vdev->driver_del_ack_enabled = true;
+	else if (rx_packets < low_th)
+		vdev->driver_del_ack_enabled = false;
+
+	if (old_driver_del_ack_enabled != vdev->driver_del_ack_enabled) {
+		dp_debug("vdev_id %d driver_del_ack_enabled %d rx_packets %ld time_in_ms %d high_th %d low_th %d",
+			 vdev->vdev_id, vdev->driver_del_ack_enabled,
+			 rx_packets, time_in_ms, high_th, low_th);
+	}
+}
+
+/**
+ * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev)
+{
+	int i;
+	struct tcp_stream_node *tcp_node_list;
+	struct tcp_stream_node *temp;
+
+	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
+		tcp_node_list = NULL;
+		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
+		if (vdev->tcp_ack_hash.node[i].no_of_entries)
+			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
+
+		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
+		vdev->tcp_ack_hash.node[i].head = NULL;
+		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
+
+		/* Send all packets */
+		while (tcp_node_list) {
+			int tx_comp_req = vdev->pdev->cfg.default_tx_comp_req;
+			qdf_nbuf_t msdu_list;
+
+			temp = tcp_node_list;
+			tcp_node_list = temp->next;
+
+			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+						  temp->head,
+						  tx_comp_req, false);
+			if (msdu_list)
+				qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+			ol_txrx_vdev_free_tcp_node(vdev, temp);
+		}
+	}
+	ol_tx_sched(vdev->pdev);
+}
+
+/**
+ * tcp_del_ack_tasklet() - tasklet function to send ack packets
+ * @data: vdev handle
+ *
+ * Return: none
+ */
+void tcp_del_ack_tasklet(void *data)
+{
+	struct ol_txrx_vdev_t *vdev = data;
+
+	ol_tx_hl_send_all_tcp_ack(vdev);
+}
+
+/**
+ * ol_tx_get_stream_id() - get stream_id from packet info
+ * @info: packet info
+ *
+ * Return: stream_id
+ */
+uint16_t ol_tx_get_stream_id(struct packet_info *info)
+{
+	return ((info->dst_port + info->dst_ip + info->src_port + info->src_ip)
+					 & (OL_TX_HL_DEL_ACK_HASH_SIZE - 1));
+}
+
+/**
+ * ol_tx_is_tcp_ack() - check whether the packet is tcp ack frame
+ * @msdu: packet
+ *
+ * Return: true if the packet is tcp ack frame
+ */
+static bool
+ol_tx_is_tcp_ack(qdf_nbuf_t msdu)
+{
+	uint16_t ether_type;
+	uint8_t  protocol;
+	uint8_t  flag, ip_header_len, tcp_header_len;
+	uint32_t seg_len;
+	uint8_t  *skb_data;
+	uint32_t skb_len;
+	bool tcp_acked = false;
+	uint32_t tcp_header_off;
+
+	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
+	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
+	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
+	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
+		goto exit;
+
+	ether_type = (uint16_t)(*(uint16_t *)
+			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
+	protocol = (uint16_t)(*(uint16_t *)
+			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
+
+	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
+	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
+		ip_header_len = ((uint8_t)(*(uint8_t *)
+				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
+				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
+		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
+
+		tcp_header_len = ((uint8_t)(*(uint8_t *)
+			(skb_data + tcp_header_off +
+			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
+		seg_len = skb_len - tcp_header_len - tcp_header_off;
+		flag = (uint8_t)(*(uint8_t *)
+			(skb_data + tcp_header_off +
+			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
+
+		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0))
+			tcp_acked = true;
+	}
+
+exit:
+
+	return tcp_acked;
+}
+
+/**
+ * ol_tx_get_packet_info() - update packet info for passed msdu
+ * @msdu: packet
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info)
+{
+	uint16_t ether_type;
+	uint8_t  protocol;
+	uint8_t  flag, ip_header_len, tcp_header_len;
+	uint32_t seg_len;
+	uint8_t  *skb_data;
+	uint32_t skb_len;
+	uint32_t tcp_header_off;
+
+	info->type = NO_TCP_PKT;
+
+	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
+	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
+	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
+	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
+		return;
+
+	ether_type = (uint16_t)(*(uint16_t *)
+			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
+	protocol = (uint16_t)(*(uint16_t *)
+			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
+
+	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
+	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
+		ip_header_len = ((uint8_t)(*(uint8_t *)
+				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
+				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
+		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
+
+		tcp_header_len = ((uint8_t)(*(uint8_t *)
+			(skb_data + tcp_header_off +
+			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
+		seg_len = skb_len - tcp_header_len - tcp_header_off;
+		flag = (uint8_t)(*(uint8_t *)
+			(skb_data + tcp_header_off +
+			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
+
+		info->src_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
+			(skb_data + QDF_NBUF_TRAC_IPV4_SRC_ADDR_OFFSET)));
+		info->dst_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
+			(skb_data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET)));
+		info->src_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
+				(skb_data + tcp_header_off +
+				QDF_NBUF_TRAC_TCP_SPORT_OFFSET)));
+		info->dst_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
+				(skb_data + tcp_header_off +
+				QDF_NBUF_TRAC_TCP_DPORT_OFFSET)));
+		info->stream_id = ol_tx_get_stream_id(info);
+
+		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0)) {
+			info->type = TCP_PKT_ACK;
+			info->ack_number = (uint32_t)(*(uint32_t *)
+				(skb_data + tcp_header_off +
+				QDF_NBUF_TRAC_TCP_ACK_OFFSET));
+			info->ack_number = QDF_SWAP_U32(info->ack_number);
+		} else {
+			info->type = TCP_PKT_NO_ACK;
+		}
+	}
+}
+
+/**
+ * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
+ *                                       stream info
+ * @vdev: vdev handle
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
+				       struct packet_info *info)
+{
+	uint8_t no_of_entries;
+	struct tcp_stream_node *node_to_be_remove = NULL;
+
+	/* remove tcp node from hash */
+	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
+			hash_node_lock);
+
+	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].
+			no_of_entries;
+	if (no_of_entries > 1) {
+		/* collision case */
+		struct tcp_stream_node *head =
+			vdev->tcp_ack_hash.node[info->stream_id].head;
+		struct tcp_stream_node *temp;
+
+		if ((head->dst_ip == info->dst_ip) &&
+		    (head->src_ip == info->src_ip) &&
+		    (head->src_port == info->src_port) &&
+		    (head->dst_port == info->dst_port)) {
+			node_to_be_remove = head;
+			vdev->tcp_ack_hash.node[info->stream_id].head =
+				head->next;
+			vdev->tcp_ack_hash.node[info->stream_id].
+				no_of_entries--;
+		} else {
+			temp = head;
+			while (temp->next) {
+				if ((temp->next->dst_ip == info->dst_ip) &&
+				    (temp->next->src_ip == info->src_ip) &&
+				    (temp->next->src_port == info->src_port) &&
+				    (temp->next->dst_port == info->dst_port)) {
+					node_to_be_remove = temp->next;
+					temp->next = temp->next->next;
+					vdev->tcp_ack_hash.
+						node[info->stream_id].
+						no_of_entries--;
+					break;
+				}
+				temp = temp->next;
+			}
+		}
+	} else if (no_of_entries == 1) {
+		/* Only one tcp_node */
+		node_to_be_remove =
+			 vdev->tcp_ack_hash.node[info->stream_id].head;
+		vdev->tcp_ack_hash.node[info->stream_id].head = NULL;
+		vdev->tcp_ack_hash.node[info->stream_id].no_of_entries = 0;
+	}
+	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
+			  node[info->stream_id].hash_node_lock);
+
+	/* send packets */
+	if (node_to_be_remove) {
+		int tx_comp_req = vdev->pdev->cfg.default_tx_comp_req;
+		qdf_nbuf_t msdu_list;
+
+		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+					  node_to_be_remove->head,
+					  tx_comp_req, true);
+		if (msdu_list)
+			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
+	}
+}
+
+static struct tcp_stream_node *
+ol_tx_hl_rep_tcp_ack(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu,
+		     struct packet_info *info, bool *is_found,
+		     bool *start_timer)
+{
+	struct tcp_stream_node *node_to_be_remove = NULL;
+	struct tcp_stream_node *head =
+		 vdev->tcp_ack_hash.node[info->stream_id].head;
+	struct tcp_stream_node *temp;
+
+	if ((head->dst_ip == info->dst_ip) &&
+	    (head->src_ip == info->src_ip) &&
+	    (head->src_port == info->src_port) &&
+	    (head->dst_port == info->dst_port)) {
+		*is_found = true;
+		if ((head->ack_number < info->ack_number) &&
+		    (head->no_of_ack_replaced <
+		    ol_cfg_get_del_ack_count_value(vdev->pdev->ctrl_pdev))) {
+			/* replace ack packet */
+			qdf_nbuf_tx_free(head->head, 1);
+			head->head = msdu;
+			head->ack_number = info->ack_number;
+			head->no_of_ack_replaced++;
+			*start_timer = true;
+
+			vdev->no_of_tcpack_replaced++;
+
+			if (head->no_of_ack_replaced ==
+			    ol_cfg_get_del_ack_count_value(
+			    vdev->pdev->ctrl_pdev)) {
+				node_to_be_remove = head;
+				vdev->tcp_ack_hash.node[info->stream_id].head =
+					head->next;
+				vdev->tcp_ack_hash.node[info->stream_id].
+					no_of_entries--;
+			}
+		} else {
+			/* append and send packets */
+			head->head->next = msdu;
+			node_to_be_remove = head;
+			vdev->tcp_ack_hash.node[info->stream_id].head =
+				head->next;
+			vdev->tcp_ack_hash.node[info->stream_id].
+				no_of_entries--;
+		}
+	} else {
+		temp = head;
+		while (temp->next) {
+			if ((temp->next->dst_ip == info->dst_ip) &&
+			    (temp->next->src_ip == info->src_ip) &&
+			    (temp->next->src_port == info->src_port) &&
+			    (temp->next->dst_port == info->dst_port)) {
+				*is_found = true;
+				if ((temp->next->ack_number <
+					info->ack_number) &&
+				    (temp->next->no_of_ack_replaced <
+					 ol_cfg_get_del_ack_count_value(
+					 vdev->pdev->ctrl_pdev))) {
+					/* replace ack packet */
+					qdf_nbuf_tx_free(temp->next->head, 1);
+					temp->next->head  = msdu;
+					temp->next->ack_number =
+						info->ack_number;
+					temp->next->no_of_ack_replaced++;
+					*start_timer = true;
+
+					vdev->no_of_tcpack_replaced++;
+
+					if (temp->next->no_of_ack_replaced ==
+					   ol_cfg_get_del_ack_count_value(
+					   vdev->pdev->ctrl_pdev)) {
+						node_to_be_remove = temp->next;
+						temp->next = temp->next->next;
+						vdev->tcp_ack_hash.
+							node[info->stream_id].
+							no_of_entries--;
+					}
+				} else {
+					/* append and send packets */
+					temp->next->head->next = msdu;
+					node_to_be_remove = temp->next;
+					temp->next = temp->next->next;
+					vdev->tcp_ack_hash.
+						node[info->stream_id].
+						no_of_entries--;
+				}
+				break;
+			}
+			temp = temp->next;
+		}
+	}
+	return node_to_be_remove;
+}
+
+/**
+ * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
+ *                                       passed packet info
+ * @vdev: vdev handle
+ * @msdu: packet
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
+				       qdf_nbuf_t msdu,
+				       struct packet_info *info)
+{
+	uint8_t no_of_entries;
+	struct tcp_stream_node *node_to_be_remove = NULL;
+	bool is_found = false, start_timer = false;
+
+	/* replace ack if required or send packets */
+	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
+			hash_node_lock);
+
+	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].no_of_entries;
+	if (no_of_entries > 0) {
+		node_to_be_remove = ol_tx_hl_rep_tcp_ack(vdev, msdu, info,
+							 &is_found,
+							 &start_timer);
+	}
+
+	if (no_of_entries == 0 || !is_found) {
+		/* Alloc new tcp node */
+		struct tcp_stream_node *new_node;
+
+		new_node = ol_txrx_vdev_alloc_tcp_node(vdev);
+		if (!new_node) {
+			qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
+					  node[info->stream_id].hash_node_lock);
+			dp_alert("Malloc failed");
+			return;
+		}
+		new_node->stream_id = info->stream_id;
+		new_node->dst_ip = info->dst_ip;
+		new_node->src_ip = info->src_ip;
+		new_node->dst_port = info->dst_port;
+		new_node->src_port = info->src_port;
+		new_node->ack_number = info->ack_number;
+		new_node->head = msdu;
+		new_node->next = NULL;
+		new_node->no_of_ack_replaced = 0;
+
+		start_timer = true;
+		/* insert new_node */
+		if (!vdev->tcp_ack_hash.node[info->stream_id].head) {
+			vdev->tcp_ack_hash.node[info->stream_id].head =
+				new_node;
+			vdev->tcp_ack_hash.node[info->stream_id].
+				no_of_entries = 1;
+		} else {
+			struct tcp_stream_node *temp =
+				 vdev->tcp_ack_hash.node[info->stream_id].head;
+			while (temp->next)
+				temp = temp->next;
+
+			temp->next = new_node;
+			vdev->tcp_ack_hash.node[info->stream_id].
+				no_of_entries++;
+		}
+	}
+	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
+			  hash_node_lock);
+
+	/* start timer */
+	if (start_timer &&
+	    (!qdf_atomic_read(&vdev->tcp_ack_hash.is_timer_running))) {
+		qdf_hrtimer_start(&vdev->tcp_ack_hash.timer,
+				  qdf_ns_to_ktime((
+						ol_cfg_get_del_ack_timer_value(
+						vdev->pdev->ctrl_pdev) *
+						1000000)),
+			__QDF_HRTIMER_MODE_REL);
+		qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 1);
+	}
+
+	/* send packets */
+	if (node_to_be_remove) {
+		int tx_comp_req = vdev->pdev->cfg.default_tx_comp_req;
+		qdf_nbuf_t msdu_list = NULL;
+
+		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+					  node_to_be_remove->head,
+					  tx_comp_req, true);
+		if (msdu_list)
+			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
+	}
+}
+
+/**
+ * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
+ * @timer: timer handle
+ *
+ * Return: enum
+ */
+enum qdf_hrtimer_restart_status
+ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer)
+{
+	struct ol_txrx_vdev_t *vdev = qdf_container_of(timer,
+						       struct ol_txrx_vdev_t,
+						       tcp_ack_hash.timer);
+	enum qdf_hrtimer_restart_status ret = __QDF_HRTIMER_NORESTART;
+
+	qdf_sched_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
+	qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 0);
+	return ret;
+}
+
+/**
+ * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev)
+{
+	int i;
+	struct tcp_stream_node *tcp_node_list;
+	struct tcp_stream_node *temp;
+
+	qdf_hrtimer_cancel(&vdev->tcp_ack_hash.timer);
+	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
+		tcp_node_list = NULL;
+		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
+
+		if (vdev->tcp_ack_hash.node[i].no_of_entries)
+			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
+
+		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
+		vdev->tcp_ack_hash.node[i].head = NULL;
+		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
+
+		/* free all packets */
+		while (tcp_node_list) {
+			temp = tcp_node_list;
+			tcp_node_list = temp->next;
+
+			qdf_nbuf_tx_free(temp->head, 1/*error*/);
+			ol_txrx_vdev_free_tcp_node(vdev, temp);
+		}
+	}
+	ol_txrx_vdev_deinit_tcp_del_ack(vdev);
+}
+
+/**
+ * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
+{
+	int i;
+
+	vdev->driver_del_ack_enabled = false;
+
+	dp_debug("vdev-id=%u, driver_del_ack_enabled=%d",
+		 vdev->vdev_id,
+		 vdev->driver_del_ack_enabled);
+
+	vdev->no_of_tcpack = 0;
+	vdev->no_of_tcpack_replaced = 0;
+
+	qdf_hrtimer_init(&vdev->tcp_ack_hash.timer,
+			 ol_tx_hl_vdev_tcp_del_ack_timer,
+			 __QDF_CLOCK_MONOTONIC,
+			 __QDF_HRTIMER_MODE_REL,
+			 QDF_CONTEXT_HARDWARE
+			 );
+	qdf_create_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq,
+		      tcp_del_ack_tasklet,
+		      vdev);
+	qdf_atomic_init(&vdev->tcp_ack_hash.is_timer_running);
+	qdf_atomic_init(&vdev->tcp_ack_hash.tcp_node_in_use_count);
+	qdf_spinlock_create(&vdev->tcp_ack_hash.tcp_free_list_lock);
+	vdev->tcp_ack_hash.tcp_free_list = NULL;
+	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
+		qdf_spinlock_create(&vdev->tcp_ack_hash.node[i].hash_node_lock);
+		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
+		vdev->tcp_ack_hash.node[i].head = NULL;
+	}
+}
+
+/**
+ * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
+{
+	struct tcp_stream_node *temp;
+
+	qdf_destroy_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
+
+	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+	while (vdev->tcp_ack_hash.tcp_free_list) {
+		temp = vdev->tcp_ack_hash.tcp_free_list;
+		vdev->tcp_ack_hash.tcp_free_list = temp->next;
+		qdf_mem_free(temp);
+	}
+	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+}
+
+/**
+ * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
+ * @vdev: vdev handle
+ * @node: tcp stream node
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
+				struct tcp_stream_node *node)
+{
+	qdf_atomic_dec(&vdev->tcp_ack_hash.tcp_node_in_use_count);
+
+	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+	if (vdev->tcp_ack_hash.tcp_free_list) {
+		node->next = vdev->tcp_ack_hash.tcp_free_list;
+		vdev->tcp_ack_hash.tcp_free_list = node;
+	} else {
+		vdev->tcp_ack_hash.tcp_free_list = node;
+		node->next = NULL;
+	}
+	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+}
+
+/**
+ * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
+ * @vdev: vdev handle
+ *
+ * Return: tcp stream node
+ */
+struct tcp_stream_node *ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev)
+{
+	struct tcp_stream_node *node = NULL;
+
+	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+	if (vdev->tcp_ack_hash.tcp_free_list) {
+		node = vdev->tcp_ack_hash.tcp_free_list;
+		vdev->tcp_ack_hash.tcp_free_list = node->next;
+	}
+	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
+
+	if (!node) {
+		node = qdf_mem_malloc(sizeof(struct ol_txrx_vdev_t));
+		if (!node)
+			return NULL;
+	}
+	qdf_atomic_inc(&vdev->tcp_ack_hash.tcp_node_in_use_count);
+	return node;
+}
+
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req;
+	struct packet_info pkt_info;
+	qdf_nbuf_t temp;
+
+	if (ol_tx_is_tcp_ack(msdu_list))
+		vdev->no_of_tcpack++;
+
+	/* check Enable through ini */
+	if (!ol_cfg_get_del_ack_enable_value(vdev->pdev->ctrl_pdev) ||
+	    (!vdev->driver_del_ack_enabled)) {
+		if (qdf_atomic_read(&vdev->tcp_ack_hash.tcp_node_in_use_count))
+			ol_tx_hl_send_all_tcp_ack(vdev);
+
+		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
+				    tx_comp_req, true);
+	}
+
+	ol_tx_get_packet_info(msdu_list, &pkt_info);
+
+	if (pkt_info.type == TCP_PKT_NO_ACK) {
+		ol_tx_hl_find_and_send_tcp_stream(vdev, &pkt_info);
+		temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
+				     tx_comp_req, true);
+		return temp;
+	}
+
+	if (pkt_info.type == TCP_PKT_ACK) {
+		ol_tx_hl_find_and_replace_tcp_ack(vdev, msdu_list, &pkt_info);
+		return NULL;
+	}
+
+	temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
+			     tx_comp_req, true);
+	return temp;
+}
+#else
+
 qdf_nbuf_t
 ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 {
@@ -708,8 +1435,10 @@
 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
 				pdev->cfg.request_tx_comp;
 
-	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
+	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+			     msdu_list, tx_comp_req, true);
 }
+#endif
 
 qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
 			    enum ol_tx_spec tx_spec,
@@ -724,7 +1453,7 @@
 		    (pdev->tx_data_callback.func))
 			tx_comp_req = 1;
 	}
-	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
+	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req, true);
 }
 
 #ifdef FEATURE_WLAN_TDLS
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index e93bcd6..916205b 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -1723,6 +1723,9 @@
 	vdev->ll_pause.max_q_depth =
 		ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
 	qdf_status = qdf_event_create(&vdev->wait_delete_comp);
+
+	ol_txrx_vdev_init_tcp_del_ack(vdev);
+
 	/* add this vdev into the pdev's list */
 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
 	if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
@@ -5636,7 +5639,11 @@
 	.pkt_log_init = htt_pkt_log_init,
 	.pkt_log_con_service = ol_txrx_pkt_log_con_service,
 	.register_pktdump_cb = ol_register_packetdump_callback,
-	.unregister_pktdump_cb = ol_deregister_packetdump_callback
+	.unregister_pktdump_cb = ol_deregister_packetdump_callback,
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+	.pdev_reset_driver_del_ack = ol_tx_pdev_reset_driver_del_ack,
+	.vdev_set_driver_del_ack_enable = ol_tx_vdev_set_driver_del_ack_enable
+#endif
 };
 
 static struct cdp_flowctl_ops ol_ops_flowctl = {
diff --git a/core/dp/txrx/ol_txrx.h b/core/dp/txrx/ol_txrx.h
index b586bb4..f8ae203 100644
--- a/core/dp/txrx/ol_txrx.h
+++ b/core/dp/txrx/ol_txrx.h
@@ -25,6 +25,7 @@
 #include <cdp_txrx_handle.h>
 #include <ol_txrx_types.h>
 #include <ol_txrx_internal.h>
+#include <qdf_hrtimer.h>
 
 /*
  * Pool of tx descriptors reserved for
@@ -438,4 +439,170 @@
  * return NONE
  */
 void ol_txrx_set_tx_compl_tsf64(bool val);
+
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+
+/**
+ * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
+
+/**
+ * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
+
+/**
+ * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
+ * @vdev: vdev handle
+ * @node: tcp stream node
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
+				struct tcp_stream_node *node);
+
+/**
+ * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
+ * @vdev: vdev handle
+ *
+ * Return: tcp stream node
+ */
+struct tcp_stream_node *
+ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev);
+
+/**
+ * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
+ * @ppdev: the data physical device
+ *
+ * Return: none
+ */
+void
+ol_tx_pdev_reset_driver_del_ack(struct cdp_pdev *ppdev);
+
+/**
+ * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
+ * @vdev_id: vdev id
+ * @rx_packets: number of rx packets
+ * @time_in_ms: time in ms
+ * @high_th: high threshold
+ * @low_th: low threshold
+ *
+ * Return: none
+ */
+void
+ol_tx_vdev_set_driver_del_ack_enable(uint8_t vdev_id,
+				     unsigned long rx_packets,
+				     uint32_t time_in_ms,
+				     uint32_t high_th,
+				     uint32_t low_th);
+
+/**
+ * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev);
+
+/**
+ * tcp_del_ack_tasklet() - tasklet function to send ack packets
+ * @data: vdev handle
+ *
+ * Return: none
+ */
+void tcp_del_ack_tasklet(void *data);
+
+/**
+ * ol_tx_get_stream_id() - get stream_id from packet info
+ * @info: packet info
+ *
+ * Return: stream_id
+ */
+uint16_t ol_tx_get_stream_id(struct packet_info *info);
+
+/**
+ * ol_tx_get_packet_info() - update packet info for passed msdu
+ * @msdu: packet
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info);
+
+/**
+ * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
+ *                                       stream info
+ * @vdev: vdev handle
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
+				       struct packet_info *info);
+
+/**
+ * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
+ *                                       passed packet info
+ * @vdev: vdev handle
+ * @msdu: packet
+ * @info: packet info
+ *
+ * Return: none
+ */
+void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
+				       qdf_nbuf_t msdu,
+				       struct packet_info *info);
+
+/**
+ * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
+ * @timer: timer handle
+ *
+ * Return: enum
+ */
+enum qdf_hrtimer_restart_status
+ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer);
+
+/**
+ * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev);
+
+#else
+
+static inline
+void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
+{
+}
+
+static inline
+void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
+{
+}
+
+static inline
+void ol_tx_pdev_reset_driver_del_ack(void)
+{
+}
+
+static inline
+void ol_tx_vdev_set_driver_del_ack_enable(uint8_t vdev_id,
+					  unsigned long rx_packets,
+					  uint32_t time_in_ms,
+					  uint32_t high_th,
+					  uint32_t low_th)
+{
+}
+
+#endif
+
 #endif /* _OL_TXRX__H_ */
diff --git a/core/dp/txrx/ol_txrx_types.h b/core/dp/txrx/ol_txrx_types.h
index fdb4c27..3aa0311 100644
--- a/core/dp/txrx/ol_txrx_types.h
+++ b/core/dp/txrx/ol_txrx_types.h
@@ -42,6 +42,7 @@
 #include "cdp_txrx_flow_ctrl_v2.h"
 #include "cdp_txrx_peer_ops.h"
 #include <qdf_trace.h>
+#include "qdf_hrtimer.h"
 
 /*
  * The target may allocate multiple IDs for a peer.
@@ -1075,6 +1076,76 @@
 	bool enable_tx_compl_tsf64;
 };
 
+#define OL_TX_HL_DEL_ACK_HASH_SIZE    256
+
+/**
+ * enum ol_tx_hl_packet_type - type for tcp packet
+ * @TCP_PKT_ACK: TCP ACK frame
+ * @TCP_PKT_NO_ACK: TCP frame, but not the ack
+ * @NO_TCP_PKT: Not the TCP frame
+ */
+enum ol_tx_hl_packet_type {
+	TCP_PKT_ACK,
+	TCP_PKT_NO_ACK,
+	NO_TCP_PKT
+};
+
+/**
+ * struct packet_info - tcp packet information
+ */
+struct packet_info {
+	/** @type: flag the packet type */
+	enum ol_tx_hl_packet_type type;
+	/** @stream_id: stream identifier */
+	uint16_t stream_id;
+	/** @ack_number: tcp ack number */
+	uint32_t ack_number;
+	/** @dst_ip: destination ip address */
+	uint32_t dst_ip;
+	/** @src_ip: source ip address */
+	uint32_t src_ip;
+	/** @dst_port: destination port */
+	uint16_t dst_port;
+	/** @src_port: source port */
+	uint16_t src_port;
+};
+
+/**
+ * struct tcp_stream_node - tcp stream node
+ */
+struct tcp_stream_node {
+	/** @next: next tcp stream node */
+	struct tcp_stream_node *next;
+	/** @no_of_ack_replaced: count for ack replaced frames */
+	uint8_t no_of_ack_replaced;
+	/** @stream_id: stream identifier */
+	uint16_t stream_id;
+	/** @dst_ip: destination ip address */
+	uint32_t dst_ip;
+	/** @src_ip: source ip address */
+	uint32_t src_ip;
+	/** @dst_port: destination port */
+	uint16_t dst_port;
+	/** @src_port: source port */
+	uint16_t src_port;
+	/** @ack_number: tcp ack number */
+	uint32_t ack_number;
+	/** @head: point to the tcp ack frame */
+	qdf_nbuf_t head;
+};
+
+/**
+ * struct tcp_del_ack_hash_node - hash node for tcp delayed ack
+ */
+struct tcp_del_ack_hash_node {
+	/** @hash_node_lock: spin lock */
+	qdf_spinlock_t hash_node_lock;
+	/** @no_of_entries: number of entries */
+	uint8_t no_of_entries;
+	/** @head: the head of the steam node list */
+	struct tcp_stream_node *head;
+};
+
 struct ol_txrx_vdev_t {
 	struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
 				      * the parent of this virtual device
@@ -1169,6 +1240,33 @@
 	ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
 	void *osif_fc_ctx;
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+	/** @driver_del_ack_enabled: true if tcp delayed ack enabled*/
+	bool driver_del_ack_enabled;
+	/** @no_of_tcpack_replaced: number of tcp ack replaced */
+	uint32_t no_of_tcpack_replaced;
+	/** @no_of_tcpack: number of tcp ack frames */
+	uint32_t no_of_tcpack;
+
+	/** @tcp_ack_hash: hash table for tcp delay ack running information */
+	struct {
+		/** @node: tcp ack frame will be stored in this hash table */
+		struct tcp_del_ack_hash_node node[OL_TX_HL_DEL_ACK_HASH_SIZE];
+		/** @timer: timeout if no more tcp ack feeding */
+		__qdf_hrtimer_data_t timer;
+		/** @is_timer_running: is timer running? */
+		qdf_atomic_t is_timer_running;
+		/** @tcp_node_in_use_count: number of nodes in use */
+		qdf_atomic_t tcp_node_in_use_count;
+		/** @tcp_del_ack_tq: bh to handle the tcp delayed ack */
+		qdf_bh_t tcp_del_ack_tq;
+		/** @tcp_free_list: free list */
+		struct tcp_stream_node *tcp_free_list;
+		/** @tcp_free_list_lock: spin lock */
+		qdf_spinlock_t tcp_free_list_lock;
+	} tcp_ack_hash;
+#endif
+
 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
 	union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
 	bool hlTdlsFlag;
diff --git a/core/hdd/inc/hdd_dp_cfg.h b/core/hdd/inc/hdd_dp_cfg.h
index 7592d9d..01ccd89 100644
--- a/core/hdd/inc/hdd_dp_cfg.h
+++ b/core/hdd/inc/hdd_dp_cfg.h
@@ -619,6 +619,135 @@
 		"High Threshold inorder to trigger High Tx Tp")
 #endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+/*
+ * <ini>
+ * gDriverDelAckHighThreshold - High Threshold inorder to trigger TCP
+ *                              delay ack feature in the host.
+ * @Min: 0
+ * @Max: 70000
+ * @Default: 300
+ *
+ * This ini specifies the threshold of RX packets transmitted
+ * over a period of 100 ms beyond which TCP delay ack can be enabled
+ * to improve TCP RX throughput requirement.
+ *
+ * Supported Feature: Tcp Delayed Ack in the host
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_DRIVER_TCP_DELACK_HIGH_THRESHOLD \
+		CFG_INI_UINT( \
+		"gDriverDelAckHighThreshold", \
+		0, \
+		70000, \
+		300, \
+		CFG_VALUE_OR_DEFAULT, \
+		"TCP delack high threshold")
+
+/*
+ * <ini>
+ * gDriverDelAckLowThreshold - Low Threshold inorder to disable TCP
+ *                             delay ack feature in the host.
+ * @Min: 0
+ * @Max: 70000
+ * @Default: 100
+ *
+ * This ini is used to mention the Low Threshold inorder to disable TCP Del
+ * Ack feature in the host.
+ *
+ * Supported Feature: Tcp Delayed Ack in the host
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_DRIVER_TCP_DELACK_LOW_THRESHOLD \
+		CFG_INI_UINT( \
+		"gDriverDelAckLowThreshold", \
+		0, \
+		70000, \
+		100, \
+		CFG_VALUE_OR_DEFAULT, \
+		"TCP delack low threshold")
+
+/*
+ * <ini>
+ * gDriverDelAckTimerValue - Timeout value (ms) to send out all TCP del
+ *                           ack frames
+ * @Min: 1
+ * @Max: 15
+ * @Default: 3
+ *
+ * This ini specifies the time out value to send out all pending TCP delay
+ * ACK frames.
+ *
+ * Supported Feature: Tcp Delayed Ack in the host
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE \
+		CFG_INI_UINT( \
+		"gDriverDelAckTimerValue", \
+		1, \
+		15, \
+		3, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Send out all TCP Del Acks if time out")
+
+/*
+ * <ini>
+ * gDriverDelAckPktCount - The maximum number of TCP delay ack frames
+ * @Min: 0
+ * @Max: 50
+ * @Default: 20
+ *
+ * This ini specifies the maximum number of TCP delayed ack frames.
+ *
+ * Supported Feature: Tcp Delayed Ack in the host
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_DRIVER_TCP_DELACK_PKT_CNT \
+		CFG_INI_UINT( \
+		"gDriverDelAckPktCount", \
+		0, \
+		50, \
+		20, \
+		CFG_VALUE_OR_DEFAULT, \
+		"No of TCP Del ACK count")
+
+/*
+ * <ini>
+ * gDriverDelAckEnable - Control to enable Dynamic Configuration of Tcp
+ *                       Delayed Ack in the host.
+ * @Default: true
+ *
+ * This ini is used to enable Dynamic Configuration of Tcp Delayed Ack
+ * in the host.
+ *
+ * Related: gDriverDelAckHighThreshold, gDriverDelAckLowThreshold,
+ *          gDriverDelAckPktCount, gDriverDelAckTimerValue
+ *
+ * Supported Feature: Tcp Delayed Ack in the host
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_DRIVER_TCP_DELACK_ENABLE \
+		CFG_INI_BOOL( \
+		"gDriverDelAckEnable", \
+		true, \
+		"Enable tcp del ack in the driver")
+#endif
+
 /*
  * <ini>
  * NAPI_CPU_AFFINITY_MASK - CPU mask to affine NAPIs
@@ -1064,6 +1193,17 @@
 #define CFG_HDD_DP_BUS_BANDWIDTH
 #endif
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+#define CFG_DP_DRIVER_TCP_DELACK \
+	CFG(CFG_DP_DRIVER_TCP_DELACK_HIGH_THRESHOLD) \
+	CFG(CFG_DP_DRIVER_TCP_DELACK_LOW_THRESHOLD) \
+	CFG(CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE) \
+	CFG(CFG_DP_DRIVER_TCP_DELACK_PKT_CNT) \
+	CFG(CFG_DP_DRIVER_TCP_DELACK_ENABLE)
+#else
+#define CFG_DP_DRIVER_TCP_DELACK
+#endif
+
 #define CFG_HDD_DP_ALL \
 	CFG(CFG_DP_NAPI_CE_CPU_MASK) \
 	CFG(CFG_DP_RX_THREAD_CPU_MASK) \
@@ -1083,6 +1223,7 @@
 	CFG(CFG_DP_HTC_WMI_CREDIT_CNT) \
 	CFG_DP_ENABLE_FASTPATH_ALL \
 	CFG_HDD_DP_BUS_BANDWIDTH \
+	CFG_DP_DRIVER_TCP_DELACK \
 	CFG_HDD_DP_LEGACY_TX_FLOW \
 	CFG_DP_ENABLE_NUD_TRACKING_ALL \
 	CFG_DP_CONFIG_DP_TRACE_ALL
diff --git a/core/hdd/inc/wlan_hdd_cfg.h b/core/hdd/inc/wlan_hdd_cfg.h
index f2c1f3c..53928c7 100644
--- a/core/hdd/inc/wlan_hdd_cfg.h
+++ b/core/hdd/inc/wlan_hdd_cfg.h
@@ -173,6 +173,14 @@
 	bool     enable_tcp_param_update;
 #endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+	bool del_ack_enable;
+	uint32_t del_ack_threshold_high;
+	uint32_t del_ack_threshold_low;
+	uint16_t del_ack_timer_value;
+	uint16_t del_ack_pkt_count;
+#endif
+
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 	uint32_t tx_flow_low_watermark;
 	uint32_t tx_flow_hi_watermark_offset;
diff --git a/core/hdd/src/wlan_hdd_main.c b/core/hdd/src/wlan_hdd_main.c
index db05c87..456c6f7 100644
--- a/core/hdd/src/wlan_hdd_main.c
+++ b/core/hdd/src/wlan_hdd_main.c
@@ -170,6 +170,7 @@
 #include <target_type.h>
 #include <wlan_hdd_debugfs_coex.h>
 #include "wlan_blm_ucfg_api.h"
+#include "ol_txrx.h"
 
 #ifdef MODULE
 #define WLAN_MODULE_NAME  module_name(THIS_MODULE)
@@ -7980,6 +7981,37 @@
 	hdd_display_periodic_stats(hdd_ctx, (total_pkts > 0) ? true : false);
 }
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+/**
+ * hdd_set_driver_del_ack_enable() - set driver delayed ack enabled flag
+ * @vdev_id: vdev id
+ * @hdd_ctx: handle to hdd context
+ * @rx_packets: receive packet count
+ *
+ * Return: none
+ */
+static inline
+void hdd_set_driver_del_ack_enable(uint16_t vdev_id,
+				   struct hdd_context *hdd_ctx,
+				   uint64_t rx_packets)
+{
+	struct hdd_config *cfg = hdd_ctx->config;
+
+	cdp_vdev_set_driver_del_ack_enable(cds_get_context(QDF_MODULE_ID_SOC),
+					   vdev_id, rx_packets,
+					   cfg->bus_bw_compute_interval,
+					   cfg->del_ack_threshold_high,
+					   cfg->del_ack_threshold_low);
+}
+#else
+static inline
+void hdd_set_driver_del_ack_enable(uint16_t vdev_id,
+				   struct hdd_context *hdd_ctx,
+				   uint64_t rx_packets)
+{
+}
+#endif
+
 #define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
 static void __hdd_bus_bw_work_handler(struct hdd_context *hdd_ctx)
 {
@@ -8047,6 +8079,9 @@
 		if (adapter->device_mode == QDF_SAP_MODE)
 			con_sap_adapter = adapter;
 
+		hdd_set_driver_del_ack_enable(adapter->vdev_id, hdd_ctx,
+					      rx_packets);
+
 		total_rx += adapter->stats.rx_packets;
 		total_tx += adapter->stats.tx_packets;
 
@@ -12692,6 +12727,8 @@
 
 	ucfg_ipa_set_perf_level(hdd_ctx->pdev, 0, 0);
 	hdd_reset_tcp_delack(hdd_ctx);
+	cdp_pdev_reset_driver_del_ack(cds_get_context(QDF_MODULE_ID_SOC),
+				      cds_get_context(QDF_MODULE_ID_TXRX));
 }
 
 void hdd_bus_bw_compute_timer_stop(struct hdd_context *hdd_ctx)
diff --git a/core/hdd/src/wlan_hdd_tx_rx.c b/core/hdd/src/wlan_hdd_tx_rx.c
index 15eb126..407a72b 100644
--- a/core/hdd/src/wlan_hdd_tx_rx.c
+++ b/core/hdd/src/wlan_hdd_tx_rx.c
@@ -3045,6 +3045,28 @@
 }
 #endif
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
+					 struct wlan_objmgr_psoc *psoc)
+{
+	config->del_ack_threshold_high =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_HIGH_THRESHOLD);
+	config->del_ack_threshold_low =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_LOW_THRESHOLD);
+	config->del_ack_enable =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_ENABLE);
+	config->del_ack_pkt_count =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_PKT_CNT);
+	config->del_ack_timer_value =
+		cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE);
+}
+#else
+static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
+					 struct wlan_objmgr_psoc *psoc)
+{
+}
+#endif
+
 void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
 		       struct hdd_context *hdd_ctx)
 {
@@ -3055,6 +3077,9 @@
 	hdd_ini_tx_flow_control(config, psoc);
 	hdd_ini_bus_bandwidth(config, psoc);
 	hdd_ini_tcp_settings(config, psoc);
+
+	hdd_ini_tcp_del_ack_settings(config, psoc);
+
 	config->napi_cpu_affinity_mask =
 		cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
 	config->rx_thread_affinity_mask =