qcacmn: cdp: Convergence of cdp_l_flowctl_ops
Currently the cdp apis are given pdev/vdev/peer
handle as its arguments, which is directly
accessed in those APIs. This can cause a
race-condition in access of the respective
handles if it has been deleted in parallel.
Hence as a part of cdp convergence, pass only
the pdev/vdev id or peer mac address, which will be
used to get the respective handles, and hence
avoiding the unwanted access of the handles if
it has been deleted.
Converged l_flowctl_ops
- register_tx_flow_control
- set_vdev_tx_desc_limit
- set_vdev_os_queue_status
- deregister_tx_flow_control_cb
- flow_control_cb
- get_tx_resource
- ll_set_tx_pause_q_depth
- vdev_flush
- vdev_pause
- vdev_unpause
CRs-Fixed: 2539825
Change-Id: Ic526dea5bb14471cdf67bc72bec061a26d623f79
diff --git a/dp/inc/cdp_txrx_flow_ctrl_legacy.h b/dp/inc/cdp_txrx_flow_ctrl_legacy.h
index d919d50..3feefc8 100644
--- a/dp/inc/cdp_txrx_flow_ctrl_legacy.h
+++ b/dp/inc/cdp_txrx_flow_ctrl_legacy.h
@@ -59,7 +59,8 @@
if (!soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit)
return 0;
- return soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit(vdev_id, chan);
+ return soc->ops->l_flowctl_ops->set_vdev_tx_desc_limit(soc, vdev_id,
+ chan);
}
static inline int cdp_hl_fc_set_os_queue_status(ol_txrx_soc_handle soc,
@@ -69,7 +70,8 @@
if (!soc->ops->l_flowctl_ops->set_vdev_os_queue_status)
return -EINVAL;
- return soc->ops->l_flowctl_ops->set_vdev_os_queue_status(vdev_id,
+ return soc->ops->l_flowctl_ops->set_vdev_os_queue_status(soc,
+ vdev_id,
action);
}
#else
@@ -109,7 +111,7 @@
*/
static inline int
cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id,
- ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
+ ol_txrx_tx_flow_control_fp flowcontrol, void *osif_fc_ctx,
ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)
{
if (!soc || !soc->ops) {
@@ -124,7 +126,7 @@
return 0;
return soc->ops->l_flowctl_ops->register_tx_flow_control(
- vdev_id, flowControl, osif_fc_ctx,
+ soc, vdev_id, flowcontrol, osif_fc_ctx,
flow_control_is_pause);
}
#else
@@ -160,16 +162,16 @@
return 0;
return soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb(
- vdev_id);
+ soc, vdev_id);
}
/**
* cdp_fc_get_tx_resource() - get data path resource count
- * @soc - data path soc handle
- * @pdev - datapath pdev instance
- * @peer_addr - peer mac address
- * @low_watermark - low resource threshold
- * @high_watermark_offset - high resource threshold
+ * @soc: data path soc handle
+ * @pdev_id: datapath pdev ID
+ * @peer_addr: peer mac address
+ * @low_watermark: low resource threshold
+ * @high_watermark_offset: high resource threshold
*
* get data path resource count
*
@@ -177,7 +179,7 @@
* false resource is not avaialbe
*/
static inline bool
-cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
+cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, uint8_t pdev_id,
struct qdf_mac_addr peer_addr,
unsigned int low_watermark,
unsigned int high_watermark_offset)
@@ -193,8 +195,9 @@
!soc->ops->l_flowctl_ops->get_tx_resource)
return false;
- return soc->ops->l_flowctl_ops->get_tx_resource(pdev, peer_addr,
- low_watermark, high_watermark_offset);
+ return soc->ops->l_flowctl_ops->get_tx_resource(soc, pdev_id, peer_addr,
+ low_watermark,
+ high_watermark_offset);
}
/**
@@ -223,21 +226,21 @@
return 0;
return soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth(
- vdev_id, pause_q_depth);
+ soc, vdev_id, pause_q_depth);
}
/**
* cdp_fc_vdev_flush() - flush tx queue
- * @soc - data path soc handle
- * @vdev - virtual interface context pointer
+ * @soc: data path soc handle
+ * @vdev_id: id of vdev
*
* flush tx queue
*
* return None
*/
static inline void
-cdp_fc_vdev_flush(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
+cdp_fc_vdev_flush(ol_txrx_soc_handle soc, uint8_t vdev_id)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
@@ -250,22 +253,23 @@
!soc->ops->l_flowctl_ops->vdev_flush)
return;
- soc->ops->l_flowctl_ops->vdev_flush(vdev);
+ soc->ops->l_flowctl_ops->vdev_flush(soc, vdev_id);
}
/**
* cdp_fc_vdev_pause() - pause tx scheduler on vdev
- * @soc - data path soc handle
- * @vdev - virtual interface context pointer
- * @reason - pause reason
+ * @soc: data path soc handle
+ * @vdev_id: id of vdev
+ * @reason: pause reason
+ * @pause_type: type of pause
*
* pause tx scheduler on vdev
*
* return None
*/
static inline void
-cdp_fc_vdev_pause(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
- uint32_t reason, uint32_t pause_type)
+cdp_fc_vdev_pause(ol_txrx_soc_handle soc, uint8_t vdev_id,
+ uint32_t reason, uint32_t pause_type)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
@@ -278,22 +282,23 @@
!soc->ops->l_flowctl_ops->vdev_pause)
return;
- soc->ops->l_flowctl_ops->vdev_pause(vdev, reason, pause_type);
+ soc->ops->l_flowctl_ops->vdev_pause(soc, vdev_id, reason, pause_type);
}
/**
* cdp_fc_vdev_unpause() - resume tx scheduler on vdev
- * @soc - data path soc handle
- * @vdev - virtual interface context pointer
- * @reason - pause reason
+ * @soc: data path soc handle
+ * @vdev_id: id of vdev
+ * @reason: pause reason
+ * @pause_type: type of pause
*
* resume tx scheduler on vdev
*
* return None
*/
static inline void
-cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
- uint32_t reason, uint32_t pause_type)
+cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, uint8_t vdev_id,
+ uint32_t reason, uint32_t pause_type)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
@@ -305,6 +310,7 @@
!soc->ops->l_flowctl_ops->vdev_unpause)
return;
- soc->ops->l_flowctl_ops->vdev_unpause(vdev, reason, pause_type);
+ soc->ops->l_flowctl_ops->vdev_unpause(soc, vdev_id, reason,
+ pause_type);
}
#endif /* _CDP_TXRX_FC_LEG_H_ */
diff --git a/dp/inc/cdp_txrx_ops.h b/dp/inc/cdp_txrx_ops.h
index 8dfb6b6..6151bc9 100644
--- a/dp/inc/cdp_txrx_ops.h
+++ b/dp/inc/cdp_txrx_ops.h
@@ -1263,39 +1263,50 @@
/**
* struct cdp_lflowctl_ops - mcl legacy flow control ops
- * @register_tx_flow_control:
- * @deregister_tx_flow_control_cb:
- * @flow_control_cb:
- * @get_tx_resource:
- * @ll_set_tx_pause_q_depth:
- * @vdev_flush:
- * @vdev_pause:
- * @vdev_unpause:
+ * @register_tx_flow_control: Register tx flow control callback
+ * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
+ * @set_vdev_os_queue_status: Set vdev queue status
+ * @deregister_tx_flow_control_cb: Deregister tx flow control callback
+ * @flow_control_cb: Call osif flow control callback
+ * @get_tx_resource: Get tx resources and comapre with watermark
+ * @ll_set_tx_pause_q_depth: set pause queue depth
+ * @vdev_flush: Flush all packets on a particular vdev
+ * @vdev_pause: Pause a particular vdev
+ * @vdev_unpause: Unpause a particular vdev
+ *
+ * Function pointers for operations related to flow control
*/
struct cdp_lflowctl_ops {
#ifdef QCA_HL_NETDEV_FLOW_CONTROL
- int (*register_tx_flow_control)(struct cdp_soc_t *soc,
+ int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
tx_pause_callback flowcontrol);
- int (*set_vdev_tx_desc_limit)(uint8_t vdev_id, uint8_t chan);
- int (*set_vdev_os_queue_status)(uint8_t vdev_id,
+ int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
+ uint8_t vdev_id, uint8_t chan);
+ int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
+ uint8_t vdev_id,
enum netif_action_type action);
#else
- int (*register_tx_flow_control)(uint8_t vdev_id,
+ int (*register_tx_flow_control)(
+ struct cdp_soc_t *soc_hdl,
+ uint8_t vdev_id,
ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
- int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
- void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
- bool (*get_tx_resource)(struct cdp_pdev *pdev,
- struct qdf_mac_addr peer_addr,
- unsigned int low_watermark,
- unsigned int high_watermark_offset);
- int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
- void (*vdev_flush)(struct cdp_vdev *vdev);
- void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason,
- uint32_t pause_type);
- void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason,
- uint32_t pause_type);
+ int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
+ uint8_t vdev_id);
+ void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
+ bool tx_resume);
+ bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
+ struct qdf_mac_addr peer_addr,
+ unsigned int low_watermark,
+ unsigned int high_watermark_offset);
+ int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
+ int pause_q_depth);
+ void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
+ void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
+ uint32_t reason, uint32_t pause_type);
+ void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
+ uint32_t reason, uint32_t pause_type);
};
/**