qcacld-3.0: Send peer unmap confirmation cmd to FW

Send WMI_PEER_UNMAP_CONF_CMDID to FW after getting all
UNMAPs for the peer. This will ensure synchronization
between MAP/UNMAP and same peer_id won't be used for
different MAC address until all UNMAPs are processed.

Change-Id: I2c7793b527d5dbab0f9571185e6c16e0c6b7fcc3
CRs-Fixed: 2357245
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index 1fd2cab..7c22def 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -2063,6 +2063,7 @@
 	struct ol_txrx_pdev_t *pdev;
 	bool cmp_wait_mac = false;
 	uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
+	u8 check_valid = 0;
 
 	/* preconditions */
 	TXRX_ASSERT2(vdev);
@@ -2071,6 +2072,9 @@
 	pdev = vdev->pdev;
 	TXRX_ASSERT2(pdev);
 
+	if (pdev->enable_peer_unmap_conf_support)
+		check_valid = 1;
+
 	if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
 				QDF_MAC_ADDR_SIZE))
 		cmp_wait_mac = true;
@@ -2079,7 +2083,8 @@
 	/* check for duplicate existing peer */
 	TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
 		if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
-			(union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
+			(union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
+			(check_valid == 0 || temp_peer->valid)) {
 			ol_txrx_info_high(
 				"vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
 				vdev->vdev_id,
@@ -2098,7 +2103,9 @@
 		}
 		if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
 					&temp_peer->mac_addr,
-					&vdev->last_peer_mac_addr)) {
+					&vdev->last_peer_mac_addr) &&
+					(check_valid == 0 ||
+					 temp_peer->valid)) {
 			ol_txrx_info_high(
 				"vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
 				vdev->vdev_id,
@@ -2172,6 +2179,10 @@
 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
 		peer->peer_ids[i] = HTT_INVALID_PEER;
 
+	if (pdev->enable_peer_unmap_conf_support)
+		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
+			peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
+
 	qdf_spinlock_create(&peer->peer_info_lock);
 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
 
@@ -2810,6 +2821,45 @@
 }
 
 /**
+ * ol_txrx_send_peer_unmap_conf() - send peer unmap conf cmd to FW
+ * @pdev: pdev_handle
+ * @peer: peer_handle
+ *
+ * Return: None
+ */
+static inline void
+ol_txrx_send_peer_unmap_conf(ol_txrx_pdev_handle pdev,
+			     ol_txrx_peer_handle peer)
+{
+	int i;
+	int peer_cnt = 0;
+	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+
+	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
+
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER &&
+	     peer_cnt < MAX_NUM_PEER_ID_PER_PEER; i++) {
+		if (peer->map_unmap_peer_ids[i] == HTT_INVALID_PEER)
+			continue;
+		peer_ids[peer_cnt++] = peer->map_unmap_peer_ids[i];
+		peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
+	}
+
+	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
+
+	if (peer->peer_unmap_sync_cb && peer_cnt) {
+		ol_txrx_dbg("send unmap conf cmd [%d]", peer_cnt);
+		status = peer->peer_unmap_sync_cb(
+				DEBUG_INVALID_VDEV_ID,
+				peer_cnt, peer_ids);
+		if (status != QDF_STATUS_SUCCESS)
+			ol_txrx_err("unable to send unmap conf cmd [%d]",
+				    peer_cnt);
+	}
+}
+
+/**
  * ol_txrx_peer_free_tids() - free tids for the peer
  * @peer: peer handle
  *
@@ -3025,6 +3075,10 @@
 
 		ol_txrx_peer_tx_queue_free(pdev, peer);
 
+		/* send peer unmap conf cmd to fw for unmapped peer_ids */
+		if (pdev->enable_peer_unmap_conf_support)
+			ol_txrx_send_peer_unmap_conf(pdev, peer);
+
 		/* Remove mappings from peer_id to peer object */
 		ol_txrx_peer_clear_map_peer(pdev, peer);
 
@@ -3253,6 +3307,28 @@
 }
 
 /**
+ * ol_txrx_peer_detach_sync() - peer detach sync callback
+ * @ppeer - the peer object
+ * @peer_unmap_sync - peer unmap sync cb.
+ * @bitmap - bitmap indicating special handling of request.
+ *
+ *
+ * Return: None
+ */
+static void ol_txrx_peer_detach_sync(void *ppeer,
+				     ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
+				     uint32_t bitmap)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
+			  peer, qdf_atomic_read(&peer->ref_cnt));
+
+	peer->peer_unmap_sync_cb = peer_unmap_sync;
+	ol_txrx_peer_detach(peer, bitmap);
+}
+
+/**
  * ol_txrx_dump_tx_desc() - dump tx desc total and free count
  * @txrx_pdev: Pointer to txrx pdev
  *
@@ -5393,6 +5469,7 @@
 	.txrx_peer_setup = NULL,
 	.txrx_peer_teardown = NULL,
 	.txrx_peer_delete = ol_txrx_peer_detach,
+	.txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
 	.txrx_vdev_register = ol_txrx_vdev_register,
 	.txrx_soc_detach = ol_txrx_soc_detach,
 	.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
@@ -5542,6 +5619,8 @@
 	.set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
 	.set_new_htt_msg_format =
 		ol_txrx_set_new_htt_msg_format,
+	.set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
+	.get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
 };
 
 static struct cdp_peer_ops ol_ops_peer = {
@@ -5681,3 +5760,24 @@
 	pdev->new_htt_msg_format = val;
 }
 
+bool ol_txrx_get_peer_unmap_conf_support(void)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+
+	if (!pdev) {
+		qdf_print("%s: pdev is NULL\n", __func__);
+		return false;
+	}
+	return pdev->enable_peer_unmap_conf_support;
+}
+
+void ol_txrx_set_peer_unmap_conf_support(bool val)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+
+	if (!pdev) {
+		qdf_print("%s: pdev is NULL\n", __func__);
+		return;
+	}
+	pdev->enable_peer_unmap_conf_support = val;
+}