qcacld-3.0: Add txrx apis for High Latency systems (Part 2 - HL Datapath)
Add tx schedular module, tx classify module within the data SW,
tx frame queues logging, group credit support and
send-recieve tx frames instance for HL system.
CRs-Fixed: 975526
Change-Id: If1655d4d832f88e565ab946ef9e9719f256ab7b1
diff --git a/core/dp/ol/inc/ol_txrx_ctrl_api.h b/core/dp/ol/inc/ol_txrx_ctrl_api.h
index cb937ba..e0a3a17 100644
--- a/core/dp/ol/inc/ol_txrx_ctrl_api.h
+++ b/core/dp/ol/inc/ol_txrx_ctrl_api.h
@@ -68,6 +68,10 @@
#define WLAN_HDD_NETIF_OPER_HISTORY 4
#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
#define WLAN_TXRX_DESC_STATS 6
+#define WLAN_SCHEDULER_STATS 21
+#define WLAN_TX_QUEUE_STATS 22
+#define WLAN_BUNDLE_STATS 23
+#define WLAN_CREDIT_STATS 24
/**
* @brief Set up the data SW subsystem.
@@ -156,6 +160,7 @@
struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
};
+#if defined(CONFIG_HL_SUPPORT)
/**
* @brief Set paramters of WMM scheduler per AC settings. .
* @details
@@ -164,23 +169,9 @@
* @param data_pdev - the physical device being paused
* @param wmm_param - the wmm parameters
*/
-#define ol_txrx_set_wmm_param(data_pdev, wmm_param) /* no-op */
-
-/**
- * @brief notify tx data SW that a peer's transmissions are suspended.
- * @details
- * This function applies only to HL systems - in LL systems, tx flow control
- * is handled entirely within the target FW.
- * The HL host tx data SW is doing tx classification and tx download
- * scheduling, and therefore also needs to actively participate in tx
- * flow control. Specifically, the HL tx data SW needs to check whether a
- * given peer is available to transmit to, or is paused.
- * This function is used to tell the HL tx data SW when a peer is paused,
- * so the host tx data SW can hold the tx frames for that SW.
- *
- * @param data_peer - which peer is being paused
- */
-#define ol_txrx_peer_pause(data_peer) /* no-op */
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ struct ol_tx_wmm_param_t wmm_param);
/**
* @brief notify tx data SW that a peer-TID is ready to transmit to.
@@ -204,7 +195,9 @@
* @param tid - which TID within the peer is being unpaused, or -1 as a
* wildcard to unpause all TIDs within the peer
*/
-#define ol_txrx_peer_tid_unpause(data_peer, tid) /* no-op */
+void
+ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid);
+
/**
* @brief Tell a paused peer to release a specified number of tx frames.
@@ -230,8 +223,9 @@
* @param max_frms - limit on the number of tx frames to release from the
* specified TID's queues within the specified peer
*/
-#define ol_txrx_tx_release(peer, tid_mask, max_frms) /* no-op */
-
+void ol_txrx_tx_release(ol_txrx_peer_handle peer,
+ u_int32_t tid_mask,
+ int max_frms);
/**
* @brief Suspend all tx data per thermal event/timer for the
@@ -240,7 +234,9 @@
* This function applies only to HL systerms, and it makes pause and
* unpause operations happen in pairs.
*/
-#define ol_txrx_throttle_pause(data_pdev) /* no-op */
+void
+ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev);
+
/**
* @brief Resume all tx data per thermal event/timer for the
@@ -249,7 +245,64 @@
* This function applies only to HL systerms, and it makes pause and
* unpause operations happen in pairs.
*/
-#define ol_txrx_throttle_unpause(data_pdev) /* no-op */
+void
+ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev);
+
+#else
+
+static inline
+void ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ struct ol_tx_wmm_param_t wmm_param)
+{
+ return;
+}
+
+static inline void
+ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid)
+{
+ return;
+}
+
+static inline void
+ol_txrx_tx_release(ol_txrx_peer_handle peer,
+ u_int32_t tid_mask,
+ int max_frms)
+{
+ return;
+}
+
+static inline void
+ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev)
+{
+ return;
+}
+
+static inline void
+ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev)
+{
+ return;
+}
+
+#endif /* CONFIG_HL_SUPPORT */
+
+/**
+ * @brief notify tx data SW that a peer's transmissions are suspended.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ * The HL host tx data SW is doing tx classification and tx download
+ * scheduling, and therefore also needs to actively participate in tx
+ * flow control. Specifically, the HL tx data SW needs to check whether a
+ * given peer is available to transmit to, or is paused.
+ * This function is used to tell the HL tx data SW when a peer is paused,
+ * so the host tx data SW can hold the tx frames for that SW.
+ *
+ * @param data_peer - which peer is being paused
+ */
+static inline void ol_txrx_peer_pause(struct ol_txrx_peer_t *data_peer)
+{
+ return;
+}
/**
* @brief Suspend all tx data for the specified physical device.
@@ -263,7 +316,9 @@
*
* @param data_pdev - the physical device being paused
*/
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
+ defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+
void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
#else
static inline
@@ -281,7 +336,9 @@
*
* @param data_pdev - the physical device being unpaused
*/
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
+ defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+
void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
#else
static inline
@@ -488,6 +545,58 @@
*/
#define QCA_TX_DELAY_HIST_REPORT_BINS 6
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+/**
+ * @brief Configure the bad peer tx limit setting.
+ * @details
+ *
+ * @param pdev - the physics device
+ */
+void
+ol_txrx_bad_peer_txctl_set_setting(
+ struct ol_txrx_pdev_t *pdev,
+ int enable,
+ int period,
+ int txq_limit);
+
+/**
+ * @brief Configure the bad peer tx threshold limit
+ * @details
+ *
+ * @param pdev - the physics device
+ */
+void
+ol_txrx_bad_peer_txctl_update_threshold(
+ struct ol_txrx_pdev_t *pdev,
+ int level,
+ int tput_thresh,
+ int tx_limit);
+
+#else
+
+static inline void
+ol_txrx_bad_peer_txctl_set_setting(
+ struct ol_txrx_pdev_t *pdev,
+ int enable,
+ int period,
+ int txq_limit)
+{
+ return;
+}
+
+static inline void
+ol_txrx_bad_peer_txctl_update_threshold(
+ struct ol_txrx_pdev_t *pdev,
+ int level,
+ int tput_thresh,
+ int tx_limit)
+{
+ return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
+
+
void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_peer_t *peer);
diff --git a/core/dp/ol/inc/ol_txrx_dbg.h b/core/dp/ol/inc/ol_txrx_dbg.h
index 745e24f..8e54703 100644
--- a/core/dp/ol/inc/ol_txrx_dbg.h
+++ b/core/dp/ol/inc/ol_txrx_dbg.h
@@ -134,7 +134,26 @@
/* uncomment this to enable the tx queue log feature */
/* #define ENABLE_TX_QUEUE_LOG 1 */
-#define ol_tx_queue_log_display(pdev)
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+
+void
+ol_tx_queue_log_display(ol_txrx_pdev_handle pdev);
+void ol_tx_queue_log_clear(ol_txrx_pdev_handle pdev);
+#else
+
+static inline void
+ol_tx_queue_log_display(ol_txrx_pdev_handle pdev)
+{
+ return;
+}
+
+static inline
+void ol_tx_queue_log_clear(ol_txrx_pdev_handle pdev)
+{
+ return;
+}
+#endif /* defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) */
+
/*----------------------------------------*/
diff --git a/core/dp/txrx/ol_cfg.c b/core/dp/txrx/ol_cfg.c
index 60adb2a..1b4a591 100644
--- a/core/dp/txrx/ol_cfg.c
+++ b/core/dp/txrx/ol_cfg.c
@@ -57,6 +57,36 @@
}
#endif
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
+ * pdev that will be used across datapath
+ * @cfg_ctx: ptr to config parameter for txrx pdev
+ *
+ * Return: None
+ */
+static inline
+void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
+{
+ cfg_ctx->is_high_latency = 1;
+ /* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
+ cfg_ctx->tx_download_size = 1500;
+ cfg_ctx->tx_free_at_download = 0;
+}
+#else
+
+static inline
+void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
+{
+ /*
+ * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
+ * Include payload, up to the end of UDP header for IPv4 case
+ */
+ cfg_ctx->tx_download_size = 16;
+}
+#endif
+
#if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
static inline
uint8_t ol_defrag_timeout_check(void)
@@ -86,6 +116,7 @@
*
* Return: the control device object
*/
+
ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
struct txrx_pdev_cfg_param_t cfg_param)
{
@@ -97,11 +128,8 @@
return NULL;
}
- /*
- * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
- * Include payload, up to the end of UDP header for IPv4 case
- */
- cfg_ctx->tx_download_size = 16;
+ ol_pdev_cfg_param_update(cfg_ctx);
+
/* temporarily diabled PN check for Riva/Pronto */
cfg_ctx->rx_pn_check = 1;
cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
@@ -248,6 +276,21 @@
return cfg->tx_free_at_download;
}
+void ol_cfg_set_tx_free_at_download(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ cfg->tx_free_at_download = 1;
+}
+
+
+#ifdef CONFIG_HL_SUPPORT
+uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->target_tx_credit;
+}
+#else
+
uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
{
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
@@ -259,6 +302,7 @@
return rc;
}
+#endif
int ol_cfg_tx_download_size(ol_pdev_handle pdev)
{
@@ -326,6 +370,7 @@
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
return cfg->tx_flow_start_queue_offset;
}
+
#endif
#ifdef IPA_OFFLOAD
diff --git a/core/dp/txrx/ol_rx.c b/core/dp/txrx/ol_rx.c
index e1f65ce..52b894a 100644
--- a/core/dp/txrx/ol_rx.c
+++ b/core/dp/txrx/ol_rx.c
@@ -1019,6 +1019,10 @@
qdf_nbuf_t next = qdf_nbuf_next(msdu);
rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
+ /* for HL, point to payload right now*/
+ if (pdev->cfg.is_high_latency)
+ qdf_nbuf_pull_head(msdu,
+ htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc));
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
info.is_msdu_cmpl_mpdu =
diff --git a/core/dp/txrx/ol_rx_defrag.c b/core/dp/txrx/ol_rx_defrag.c
index 11b9f5a..74aaec2 100644
--- a/core/dp/txrx/ol_rx_defrag.c
+++ b/core/dp/txrx/ol_rx_defrag.c
@@ -104,16 +104,178 @@
0,
};
-inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_rx_frag_get_mac_hdr() - retrieve mac header
+ * @htt_pdev: pointer to htt pdev handle
+ * @frag: rx fragment
+ *
+ * Return: pointer to ieee mac header of frag
+ */
+static struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
+ htt_pdev_handle htt_pdev, qdf_nbuf_t frag)
+{
+ void *rx_desc;
+ int rx_desc_len;
+
+ rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
+ rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
+ return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len);
+}
+
+/**
+ * ol_rx_frag_pull_hdr() - point to payload of rx frag
+ * @htt_pdev: pointer to htt pdev handle
+ * @frag: rx fragment
+ * @hdrsize: header size
+ *
+ * Return: None
+ */
+static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
+ qdf_nbuf_t frag, int hdrsize)
+{
+ void *rx_desc;
+ int rx_desc_len;
+
+ rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
+ rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
+ qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize);
+}
+
+/**
+ * ol_rx_frag_clone() - clone the rx frag
+ * @frag: rx fragment to clone from
+ *
+ * Return: cloned buffer
+ */
+static inline qdf_nbuf_t
+ol_rx_frag_clone(qdf_nbuf_t frag)
+{
+ return qdf_nbuf_clone(frag);
+}
+
+/**
+ * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position
+ * @pdev: pointer to txrx handle
+ * @msdu: msdu
+ * @rx_desc_old_position: rx descriptor old position
+ * @ind_old_position:index of old position
+ * @rx_desc_len: rx desciptor length
+ *
+ * Return: None
+ */
+static void
+ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
+ qdf_nbuf_t msdu,
+ void **rx_desc_old_position,
+ void **ind_old_position, int *rx_desc_len)
+{
+ *rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
+ msdu);
+ *ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES;
+ *rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
+ *rx_desc_old_position);
+}
+
+/**
+ * ol_rx_frag_restructure() - point to payload for HL
+ * @pdev: physical device object
+ * @msdu: the buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ * @ind_old_position: rx msdu indication
+ * @f_type: pointing to rx defrag cipher
+ * @rx_desc_len: length by which rx descriptor to move
+ *
+ * Return: None
+ */
+static void
+ol_rx_frag_restructure(
+ ol_txrx_pdev_handle pdev,
+ qdf_nbuf_t msdu,
+ void *rx_desc_old_position,
+ void *ind_old_position,
+ const struct ol_rx_defrag_cipher *f_type,
+ int rx_desc_len)
+{
+ if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "ind_old_position,rx_desc_old_position is NULL\n");
+ ASSERT(0);
+ return;
+ }
+ /* move rx description*/
+ qdf_mem_move(rx_desc_old_position + f_type->ic_header,
+ rx_desc_old_position, rx_desc_len);
+ /* move rx indication*/
+ qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position,
+ HTT_RX_IND_HL_BYTES);
+}
+
+/**
+ * ol_rx_get_desc_len() - point to payload for HL
+ * @htt_pdev: the HTT instance the rx data was received on
+ * @wbuf: buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ *
+ * Return: Return the HL rx desc size
+ */
+static
+int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
+ qdf_nbuf_t wbuf,
+ void **rx_desc_old_position)
+{
+ int rx_desc_len = 0;
+ *rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf);
+ rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev,
+ *rx_desc_old_position);
+
+ return rx_desc_len;
+}
+
+/**
+ * ol_rx_defrag_push_rx_desc() - point to payload for HL
+ * @nbuf: buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ * @ind_old_position: rx msdu indication
+ * @rx_desc_len: HL rx desc size
+ *
+ * Return: Return the HL rx desc size
+ */
+static
+void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
+ void *rx_desc_old_position,
+ void *ind_old_position,
+ int rx_desc_len)
+{
+ qdf_nbuf_push_head(nbuf, rx_desc_len);
+ qdf_mem_move(
+ qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len);
+ qdf_mem_move(
+ qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position,
+ HTT_RX_IND_HL_BYTES);
+}
+#else
+
+static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
htt_pdev_handle htt_pdev,
qdf_nbuf_t frag)
{
return
(struct ieee80211_frame *) qdf_nbuf_data(frag);
}
-#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
+
+static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
+ qdf_nbuf_t frag, int hdrsize)
+{
qdf_nbuf_pull_head(frag, hdrsize);
-#define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
+}
+
+static inline qdf_nbuf_t
+ol_rx_frag_clone(qdf_nbuf_t frag)
+{
+ return NULL;
+}
static inline void
ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
@@ -126,6 +288,38 @@
*rx_desc_len = 0;
}
+static inline void
+ol_rx_frag_restructure(
+ ol_txrx_pdev_handle pdev,
+ qdf_nbuf_t msdu,
+ void *rx_desc_old_position,
+ void *ind_old_position,
+ const struct ol_rx_defrag_cipher *f_type,
+ int rx_desc_len)
+{
+ /* no op */
+ return;
+}
+
+static inline
+int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
+ qdf_nbuf_t wbuf,
+ void **rx_desc_old_position)
+{
+ return 0;
+}
+
+static inline
+void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
+ void *rx_desc_old_position,
+ void *ind_old_position,
+ int rx_desc_len)
+{
+ return;
+}
+#endif /* CONFIG_HL_SUPPORT */
+
+
/*
* Process incoming fragments
*/
@@ -302,7 +496,7 @@
qdf_nbuf_t frag_clone;
qdf_assert(frag);
- frag_clone = OL_RX_FRAG_CLONE(frag);
+ frag_clone = ol_rx_frag_clone(frag);
frag = frag_clone ? frag_clone : frag;
mac_hdr = (struct ieee80211_frame *)
@@ -608,6 +802,13 @@
return OL_RX_DEFRAG_ERR;
qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
+ ol_rx_frag_restructure(
+ pdev,
+ msdu,
+ rx_desc_old_position,
+ ind_old_position,
+ &f_tkip,
+ rx_desc_len);
qdf_nbuf_pull_head(msdu, f_tkip.ic_header);
qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
return OL_RX_DEFRAG_OK;
@@ -630,6 +831,13 @@
&ind_old_position, &rx_desc_len);
origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
+ ol_rx_frag_restructure(
+ pdev,
+ msdu,
+ rx_desc_old_position,
+ ind_old_position,
+ &f_wep,
+ rx_desc_len);
qdf_nbuf_pull_head(msdu, f_wep.ic_header);
qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
return OL_RX_DEFRAG_OK;
@@ -694,6 +902,13 @@
return OL_RX_DEFRAG_ERR;
qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
+ ol_rx_frag_restructure(
+ pdev,
+ nbuf,
+ rx_desc_old_position,
+ ind_old_position,
+ &f_ccmp,
+ rx_desc_len);
qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
return OL_RX_DEFRAG_OK;
@@ -788,6 +1003,7 @@
void *rx_desc_old_position = NULL;
void *ind_old_position = NULL;
int rx_desc_len = 0;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
ol_rx_frag_desc_adjust(pdev,
wbuf,
@@ -831,7 +1047,8 @@
if (wbuf == NULL)
return OL_RX_DEFRAG_ERR;
- rx_desc_len = 0;
+ rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf,
+ &rx_desc_old_position);
if (space != 0) {
const uint8_t *data_next;
@@ -1008,6 +1225,9 @@
qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
sizeof(llchdr.ethertype));
+
+ ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position,
+ ind_old_position, rx_desc_len);
}
/*
@@ -1058,5 +1278,8 @@
if (wh)
wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
+ ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position,
+ ind_old_position, rx_desc_len);
+
}
}
diff --git a/core/dp/txrx/ol_rx_fwd.c b/core/dp/txrx/ol_rx_fwd.c
index 7d33ae3..589f9d2 100644
--- a/core/dp/txrx/ol_rx_fwd.c
+++ b/core/dp/txrx/ol_rx_fwd.c
@@ -121,7 +121,18 @@
*/
qdf_nbuf_set_next(msdu, NULL); /* add NULL terminator */
- msdu = OL_TX_LL(vdev, msdu);
+ /* for HL, point to payload before send to tx again.*/
+ if (pdev->cfg.is_high_latency) {
+ void *rx_desc;
+
+ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
+ msdu);
+ qdf_nbuf_pull_head(msdu,
+ htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
+ rx_desc));
+ }
+
+ msdu = OL_TX_SEND(vdev, msdu);
if (msdu) {
/*
@@ -131,6 +142,7 @@
*/
qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR);
}
+ return;
}
void
@@ -232,6 +244,7 @@
ol_rx_deliver(vdev, peer, tid, deliver_list_head);
}
}
+ return;
}
/*
diff --git a/core/dp/txrx/ol_tx.c b/core/dp/txrx/ol_tx.c
index 789dae3..c2c79fb 100644
--- a/core/dp/txrx/ol_tx.c
+++ b/core/dp/txrx/ol_tx.c
@@ -41,7 +41,10 @@
#include <ol_txrx.h>
/* internal header files relevant only for HL systems */
+#include <ol_tx_classify.h> /* ol_tx_classify, ol_tx_classify_mgmt */
#include <ol_tx_queue.h> /* ol_tx_enqueue */
+#include <ol_tx_sched.h> /* ol_tx_sched */
+
/* internal header files relevant only for specific systems (Pronto) */
#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
@@ -165,7 +168,7 @@
/* Terminate the (single-element) list of tx frames */
qdf_nbuf_set_next(skb, NULL);
- ret = OL_TX_LL(vdev, skb);
+ ret = OL_TX_SEND(vdev, skb);
if (ret) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"%s: Failed to tx", __func__);
@@ -202,7 +205,7 @@
/* Terminate the (single-element) list of tx frames */
qdf_nbuf_set_next(skb, NULL);
- ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
+ ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
if (ret) {
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
"%s: Failed to tx", __func__);
@@ -1041,7 +1044,8 @@
qdf_nbuf_t
ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
- enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
+ enum ol_tx_spec tx_spec,
+ qdf_nbuf_t msdu_list)
{
qdf_nbuf_t msdu = msdu_list;
htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
@@ -1082,14 +1086,14 @@
uint8_t sub_type =
ol_txrx_tx_raw_subtype(tx_spec);
htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
- htt_pkt_type_native_wifi,
- sub_type);
+ htt_pkt_type_native_wifi,
+ sub_type);
} else if (ol_txrx_tx_is_raw(tx_spec)) {
/* different types of raw frames */
uint8_t sub_type =
ol_txrx_tx_raw_subtype(tx_spec);
htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
- htt_pkt_type_raw, sub_type);
+ htt_pkt_type_raw, sub_type);
}
}
/*
@@ -1125,9 +1129,10 @@
/**
* parse_ocb_tx_header() - Function to check for OCB
- * TX control header on a packet and extract it if present
- *
* @msdu: Pointer to OS packet (qdf_nbuf_t)
+ * @tx_ctrl: TX control header on a packet and extract it if present
+ *
+ * Return: true if ocb parsing is successful
*/
#define OCB_HEADER_VERSION 1
bool parse_ocb_tx_header(qdf_nbuf_t msdu,
@@ -1137,7 +1142,7 @@
struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
/* Check if TX control header is present */
- eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
+ eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
/* TX control header is not present. Nothing to do.. */
return true;
@@ -1146,12 +1151,12 @@
qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
/* Parse the TX control header */
- tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
+ tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
if (tx_ctrl)
qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
- sizeof(*tx_ctrl_hdr));
+ sizeof(*tx_ctrl_hdr));
} else {
/* The TX control header is invalid. */
return false;
@@ -1162,6 +1167,440 @@
return true;
}
+
+#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
+
+/**
+ * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
+ * for a HL system.
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @msdu: the tx frame
+ * @msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t msdu,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc = NULL;
+
+ if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
+ TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
+ tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+ } else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
+ if ((qdf_nbuf_is_ipv4_dhcp_pkt(msdu) == true) ||
+ (qdf_nbuf_is_ipv4_eapol_pkt(msdu) == true)) {
+ tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
+ }
+ }
+ return tx_desc;
+}
+#else
+
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t msdu,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc = NULL;
+ tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+ return tx_desc;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
+ * for management frame
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @tx_mgmt_frm: the tx managment frame
+ * @tx_msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+static inline
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t tx_mgmt_frm,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc;
+ tx_msdu_info->htt.action.tx_comp_req = 1;
+ tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+ return tx_desc;
+}
+
+/**
+ * ol_txrx_mgmt_send_frame() - send a management frame
+ * @vdev: virtual device sending the frame
+ * @tx_desc: tx desc
+ * @tx_mgmt_frm: management frame to send
+ * @tx_msdu_info: the tx meta data
+ * @chanfreq: download change frequency
+ *
+ * Return:
+ * 0 -> the frame is accepted for transmission, -OR-
+ * 1 -> the frame was not accepted
+ */
+static inline
+int ol_txrx_mgmt_send_frame(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_mgmt_frm,
+ struct ol_txrx_msdu_info_t *tx_msdu_info,
+ uint16_t chanfreq)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_tx_frms_queue_t *txq;
+ /*
+ * 1. Look up the peer and queue the frame in the peer's mgmt queue.
+ * 2. Invoke the download scheduler.
+ */
+ txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
+ if (!txq) {
+ /*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
+ msdu);*/
+ qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+ ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
+ 1 /* error */);
+ if (tx_msdu_info->peer) {
+ /* remove the peer reference added above */
+ ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+ }
+ return 1; /* can't accept the tx mgmt frame */
+ }
+ /* Initialize the HTT tx desc l2 header offset field.
+ * Even though tx encap does not apply to mgmt frames,
+ * htt_tx_desc_mpdu_header still needs to be called,
+ * to specifiy that there was no L2 header added by tx encap,
+ * so the frame's length does not need to be adjusted to account for
+ * an added L2 header.
+ */
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+ htt_tx_desc_init(
+ pdev->htt_pdev, tx_desc->htt_tx_desc,
+ tx_desc->htt_tx_desc_paddr,
+ ol_tx_desc_id(pdev, tx_desc),
+ tx_mgmt_frm,
+ &tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0);
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+ htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+
+ ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
+ if (tx_msdu_info->peer) {
+ /* remove the peer reference added above */
+ ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+ }
+ ol_tx_sched(vdev->pdev);
+
+ return 0;
+}
+
+#else
+
+static inline
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t tx_mgmt_frm,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc;
+ /* For LL tx_comp_req is not used so initialized to 0 */
+ tx_msdu_info->htt.action.tx_comp_req = 0;
+ tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+ /* FIX THIS -
+ * The FW currently has trouble using the host's fragments table
+ * for management frames. Until this is fixed, rather than
+ * specifying the fragment table to the FW, specify just the
+ * address of the initial fragment.
+ */
+#if defined(HELIUMPLUS_PADDR64)
+ /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
+ tx_desc); */
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ if (tx_desc) {
+ /*
+ * Following the call to ol_tx_desc_ll, frag 0 is the
+ * HTT tx HW descriptor, and the frame payload is in
+ * frag 1.
+ */
+ htt_tx_desc_frags_table_set(
+ pdev->htt_pdev,
+ tx_desc->htt_tx_desc,
+ qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
+ 0, 0);
+#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
+ dump_frag_desc(
+ "after htt_tx_desc_frags_table_set",
+ tx_desc);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ }
+
+ return tx_desc;
+}
+
+static inline
+int ol_txrx_mgmt_send_frame(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_mgmt_frm,
+ struct ol_txrx_msdu_info_t *tx_msdu_info,
+ uint16_t chanfreq)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+ QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
+ QDF_NBUF_TX_PKT_MGMT_TRACK;
+ ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+ htt_pkt_type_mgmt);
+
+ return 0;
+}
+#endif
+
+/**
+ * ol_tx_hl_base() - send tx frames for a HL system.
+ * @vdev: the virtual device sending the data
+ * @tx_spec: indicate what non-standard transmission actions to apply
+ * @msdu_list: the tx frames to send
+ * @tx_comp_req: tx completion req
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+static inline qdf_nbuf_t
+ol_tx_hl_base(
+ ol_txrx_vdev_handle vdev,
+ enum ol_tx_spec tx_spec,
+ qdf_nbuf_t msdu_list,
+ int tx_comp_req)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ qdf_nbuf_t msdu = msdu_list;
+ struct ol_txrx_msdu_info_t tx_msdu_info;
+ struct ocb_tx_ctrl_hdr_t tx_ctrl;
+
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ tx_msdu_info.peer = NULL;
+ tx_msdu_info.tso_info.is_tso = 0;
+
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ qdf_nbuf_t next;
+ struct ol_tx_frms_queue_t *txq;
+ struct ol_tx_desc_t *tx_desc = NULL;
+
+ qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
+
+ /*
+ * The netbuf will get stored into a (peer-TID) tx queue list
+ * inside the ol_tx_classify_store function or else dropped,
+ * so store the next pointer immediately.
+ */
+ next = qdf_nbuf_next(msdu);
+
+ tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
+
+ if (!tx_desc) {
+ /*
+ * If we're out of tx descs, there's no need to try
+ * to allocate tx descs for the remaining MSDUs.
+ */
+ TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
+ msdu);
+ return msdu; /* the list of unaccepted MSDUs */
+ }
+
+ /* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
+
+ if (tx_spec != OL_TX_SPEC_STD) {
+#if defined(FEATURE_WLAN_TDLS)
+ if (tx_spec & OL_TX_SPEC_NO_FREE) {
+ tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
+ } else if (tx_spec & OL_TX_SPEC_TSO) {
+#else
+ if (tx_spec & OL_TX_SPEC_TSO) {
+#endif
+ tx_desc->pkt_type = OL_TX_FRM_TSO;
+ }
+ if (ol_txrx_tx_is_raw(tx_spec)) {
+ /* CHECK THIS: does this need
+ * to happen after htt_tx_desc_init?
+ */
+ /* different types of raw frames */
+ u_int8_t sub_type =
+ ol_txrx_tx_raw_subtype(
+ tx_spec);
+ htt_tx_desc_type(htt_pdev,
+ tx_desc->htt_tx_desc,
+ htt_pkt_type_raw,
+ sub_type);
+ }
+ }
+
+ tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
+ tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
+ tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
+ tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
+ tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
+
+ /* If the vdev is in OCB mode,
+ * parse the tx control header.
+ */
+ if (vdev->opmode == wlan_op_mode_ocb) {
+ if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
+ /* There was an error parsing
+ * the header.Skip this packet.
+ */
+ goto MSDU_LOOP_BOTTOM;
+ }
+ }
+
+ txq = ol_tx_classify(vdev, tx_desc, msdu,
+ &tx_msdu_info);
+
+ if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
+ /* drop this frame,
+ * but try sending subsequent frames
+ */
+ /*TXRX_STATS_MSDU_LIST_INCR(pdev,
+ tx.dropped.no_txq,
+ msdu);*/
+ qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
+ if (tx_msdu_info.peer) {
+ /* remove the peer reference
+ * added above */
+ ol_txrx_peer_unref_delete(
+ tx_msdu_info.peer);
+ }
+ goto MSDU_LOOP_BOTTOM;
+ }
+
+ if (tx_msdu_info.peer) {
+ /*If the state is not associated then drop all
+ *the data packets received for that peer*/
+ if (tx_msdu_info.peer->state ==
+ OL_TXRX_PEER_STATE_DISC) {
+ qdf_atomic_inc(
+ &pdev->tx_queue.rsrc_cnt);
+ ol_tx_desc_frame_free_nonstd(pdev,
+ tx_desc,
+ 1);
+ ol_txrx_peer_unref_delete(
+ tx_msdu_info.peer);
+ msdu = next;
+ continue;
+ } else if (tx_msdu_info.peer->state !=
+ OL_TXRX_PEER_STATE_AUTH) {
+ if (tx_msdu_info.htt.info.ethertype !=
+ ETHERTYPE_PAE &&
+ tx_msdu_info.htt.info.ethertype
+ != ETHERTYPE_WAI) {
+ qdf_atomic_inc(
+ &pdev->tx_queue.
+ rsrc_cnt);
+ ol_tx_desc_frame_free_nonstd(
+ pdev,
+ tx_desc, 1);
+ ol_txrx_peer_unref_delete(
+ tx_msdu_info.peer);
+ msdu = next;
+ continue;
+ }
+ }
+ }
+ /*
+ * Initialize the HTT tx desc l2 header offset field.
+ * htt_tx_desc_mpdu_header needs to be called to
+ * make sure, the l2 header size is initialized
+ * correctly to handle cases where TX ENCAP is disabled
+ * or Tx Encap fails to perform Encap
+ */
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+
+ /*
+ * Note: when the driver is built without support for
+ * SW tx encap,the following macro is a no-op.
+ * When the driver is built with support for SW tx
+ * encap, it performs encap, and if an error is
+ * encountered, jumps to the MSDU_LOOP_BOTTOM label.
+ */
+ OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
+ tx_msdu_info);
+
+ /* initialize the HW tx descriptor */
+ htt_tx_desc_init(
+ pdev->htt_pdev, tx_desc->htt_tx_desc,
+ tx_desc->htt_tx_desc_paddr,
+ ol_tx_desc_id(pdev, tx_desc),
+ msdu,
+ &tx_msdu_info.htt,
+ &tx_msdu_info.tso_info,
+ &tx_ctrl,
+ vdev->opmode == wlan_op_mode_ocb);
+ /*
+ * If debug display is enabled, show the meta-data
+ * being downloaded to the target via the
+ * HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+
+ ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
+ if (tx_msdu_info.peer) {
+ OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
+ msdu);
+ /* remove the peer reference added above */
+ ol_txrx_peer_unref_delete(tx_msdu_info.peer);
+ }
+MSDU_LOOP_BOTTOM:
+ msdu = next;
+ }
+ ol_tx_sched(pdev);
+ return NULL; /* all MSDUs were accepted */
+}
+
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ int tx_comp_req = pdev->cfg.default_tx_comp_req;
+ return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
+}
+
+qdf_nbuf_t
+ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
+ enum ol_tx_spec tx_spec,
+ qdf_nbuf_t msdu_list)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ int tx_comp_req = pdev->cfg.default_tx_comp_req;
+
+ if (!tx_comp_req) {
+ if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
+ (pdev->tx_data_callback.func))
+ tx_comp_req = 1;
+ }
+ return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
+}
+
/**
* ol_tx_non_std - Allow the control-path SW to send data frames
*
@@ -1188,7 +1627,10 @@
ol_tx_non_std(ol_txrx_vdev_handle vdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
{
- return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
+ if (vdev->pdev->cfg.is_high_latency)
+ return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
+ else
+ return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
}
void
@@ -1297,7 +1739,7 @@
struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ol_tx_desc_t *tx_desc;
struct ol_txrx_msdu_info_t tx_msdu_info;
-
+ int result = 0;
tx_msdu_info.tso_info.is_tso = 0;
tx_msdu_info.htt.action.use_6mbps = use_6mbps;
@@ -1348,37 +1790,8 @@
tx_msdu_info.peer = NULL;
-
- /* For LL tx_comp_req is not used so initialized to 0 */
- tx_msdu_info.htt.action.tx_comp_req = 0;
- tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
- /* FIX THIS -
- * The FW currently has trouble using the host's fragments table
- * for management frames. Until this is fixed, rather than
- * specifying the fragment table to the FW, specify just the
- * address of the initial fragment.
- */
-#if defined(HELIUMPLUS_PADDR64)
- /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
- tx_desc); */
-#endif /* defined(HELIUMPLUS_PADDR64) */
- if (tx_desc) {
- /*
- * Following the call to ol_tx_desc_ll, frag 0 is the
- * HTT tx HW descriptor, and the frame payload is in
- * frag 1.
- */
- htt_tx_desc_frags_table_set(
- pdev->htt_pdev,
- tx_desc->htt_tx_desc,
- qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
- 0, 0);
-#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
- dump_frag_desc(
- "after htt_tx_desc_frags_table_set",
- tx_desc);
-#endif /* defined(HELIUMPLUS_PADDR64) */
- }
+ tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm,
+ &tx_msdu_info);
if (!tx_desc)
return -EINVAL; /* can't accept the tx mgmt frame */
@@ -1386,11 +1799,8 @@
TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
- htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
- QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
- QDF_NBUF_TX_PKT_MGMT_TRACK;
- ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
- htt_pkt_type_mgmt);
+ result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm,
+ &tx_msdu_info, chanfreq);
return 0; /* accepted the tx mgmt frame */
}
diff --git a/core/dp/txrx/ol_tx.h b/core/dp/txrx/ol_tx.h
index d90e44d..561900a 100644
--- a/core/dp/txrx/ol_tx.h
+++ b/core/dp/txrx/ol_tx.h
@@ -47,6 +47,12 @@
qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
+#ifdef CONFIG_HL_SUPPORT
+#define OL_TX_SEND ol_tx_hl
+#else
+#define OL_TX_SEND OL_TX_LL
+#endif
+
#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
#define OL_TX_LL ol_tx_ll_queue
#else
@@ -67,6 +73,29 @@
return;
}
#endif
+
+/**
+ * ol_tx_non_std_hl() - send non std tx frame.
+ * @vdev: the virtual device sending the data
+ * @tx_spec: indicate what non-standard transmission actions to apply
+ * @msdu_list: the tx frames to send
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+qdf_nbuf_t
+ol_tx_non_std_hl(ol_txrx_vdev_handle data_vdev,
+ enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
+
+/**
+ * ol_tx_hl() - transmit tx frames for a HL system.
+ * @vdev: the virtual device transmit the data
+ * @msdu_list: the tx frames to send
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
+
qdf_nbuf_t
ol_tx_non_std_ll(ol_txrx_vdev_handle data_vdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
diff --git a/core/dp/txrx/ol_tx_classify.c b/core/dp/txrx/ol_tx_classify.c
new file mode 100644
index 0000000..879b219
--- /dev/null
+++ b/core/dp/txrx/ol_tx_classify.c
@@ -0,0 +1,888 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
+#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+#include <ol_txrx.h>
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h> /* pdev stats */
+#include <ol_tx_desc.h> /* ol_tx_desc */
+#include <ol_tx_send.h> /* ol_tx_send */
+#include <ol_txrx_peer_find.h>
+#include <ol_tx_classify.h>
+#include <ol_tx_queue.h>
+#include <ipv4.h>
+#include <ipv6_defs.h>
+#include <ip_prot.h>
+#include <enet.h> /* ETHERTYPE_VLAN, etc. */
+#include <cds_ieee80211_common.h> /* ieee80211_frame */
+
+/*
+ * In theory, this tx classify code could be used on the host or in the target.
+ * Thus, this code uses generic OS primitives, that can be aliased to either
+ * the host's OS primitives or the target's OS primitives.
+ * For now, the following #defines set up these host-specific or
+ * target-specific aliases.
+ */
+
+#if defined(CONFIG_HL_SUPPORT)
+
+#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
+#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
+
+#ifdef QCA_TX_HTT2_SUPPORT
+static void
+ol_tx_classify_htt2_frm(
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t tx_nbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
+ A_UINT8 candi_frm = 0;
+
+ /*
+ * Offload the frame re-order to L3 protocol and ONLY support
+ * TCP protocol now.
+ */
+ if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
+ (htt->info.frame_type == htt_frm_type_data) &&
+ htt->info.is_unicast &&
+ (htt->info.ethertype == ETHERTYPE_IPV4)) {
+ struct ipv4_hdr_t *ipHdr;
+
+ ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
+ htt->info.l3_hdr_offset);
+ if (ipHdr->protocol == IP_PROTOCOL_TCP)
+ candi_frm = 1;
+ }
+
+ qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
+}
+
+#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
+ ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info);
+#else
+#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
+#endif /* QCA_TX_HTT2_SUPPORT */
+/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
+#define TX_DHCP_TID 6
+
+#if defined(QCA_BAD_PEER_TX_FLOW_CL)
+static inline A_BOOL
+ol_if_tx_bad_peer_txq_overflow(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ struct ol_tx_frms_queue_t *txq)
+{
+ if (peer && pdev && txq && (peer->tx_limit_flag) &&
+ (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
+ return true;
+ else
+ return false;
+}
+#else
+static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ struct ol_tx_frms_queue_t *txq)
+{
+ return false;
+}
+#endif
+
+/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
+#define TX_EAPOL_TID 6
+
+/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
+#define TX_ARP_TID 6
+
+/* For non-IP case, use default TID */
+#define TX_DEFAULT_TID 0
+
+/*
+ * Determine IP TOS priority
+ * IP Tos format :
+ * (Refer Pg 57 WMM-test-plan-v1.2)
+ * IP-TOS - 8bits
+ * : DSCP(6-bits) ECN(2-bits)
+ * : DSCP - P2 P1 P0 X X X
+ * where (P2 P1 P0) form 802.1D
+ */
+static inline A_UINT8
+ol_tx_tid_by_ipv4(A_UINT8 *pkt)
+{
+ A_UINT8 ipPri, tid;
+ struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
+
+ ipPri = ipHdr->tos >> 5;
+ tid = ipPri & 0x7;
+
+ return tid;
+}
+
+static inline A_UINT8
+ol_tx_tid_by_ipv6(A_UINT8 *pkt)
+{
+ return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
+}
+
+static inline void
+ol_tx_set_ether_type(
+ A_UINT8 *datap,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ A_UINT16 typeorlength;
+ A_UINT8 *ptr;
+ A_UINT8 *l3_data_ptr;
+
+ if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
+ IEEE80211_FC0_TYPE_DATA) {
+ struct llc_snap_hdr_t *llc;
+ /* dot11 encapsulated frame */
+ struct ieee80211_qosframe *whqos =
+ (struct ieee80211_qosframe *)datap;
+ if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+ tx_msdu_info->htt.info.l3_hdr_offset =
+ sizeof(struct ieee80211_qosframe);
+ } else {
+ tx_msdu_info->htt.info.l3_hdr_offset =
+ sizeof(struct ieee80211_frame);
+ }
+ llc = (struct llc_snap_hdr_t *)
+ (datap + tx_msdu_info->htt.info.l3_hdr_offset);
+ tx_msdu_info->htt.info.ethertype =
+ (llc->ethertype[0] << 8) | llc->ethertype[1];
+ /*
+ * l3_hdr_offset refers to the end of the 802.3 or
+ * 802.11 header, which may be a LLC/SNAP header rather
+ * than the IP header.
+ * Thus, don't increment l3_hdr_offset += sizeof(*llc);
+ * rather,leave it as is.
+ */
+ } else {
+ /*
+ * This function should only be applied to data frames.
+ * For management frames, we already know to use
+ * HTT_TX_EXT_TID_MGMT.
+ */
+ TXRX_ASSERT2(0);
+ }
+ } else if (tx_msdu_info->htt.info.l2_hdr_type ==
+ htt_pkt_type_ethernet) {
+ ptr = (datap + ETHERNET_ADDR_LEN * 2);
+ typeorlength = (ptr[0] << 8) | ptr[1];
+ /*ETHERNET_HDR_LEN;*/
+ l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
+
+ if (typeorlength == ETHERTYPE_VLAN) {
+ ptr = (datap + ETHERNET_ADDR_LEN * 2
+ + ETHERTYPE_VLAN_LEN);
+ typeorlength = (ptr[0] << 8) | ptr[1];
+ l3_data_ptr += ETHERTYPE_VLAN_LEN;
+ }
+
+ if (!IS_ETHERTYPE(typeorlength)) {
+ /* 802.3 header*/
+ struct llc_snap_hdr_t *llc_hdr =
+ (struct llc_snap_hdr_t *)l3_data_ptr;
+ typeorlength = (llc_hdr->ethertype[0] << 8) |
+ llc_hdr->ethertype[1];
+ l3_data_ptr += sizeof(struct llc_snap_hdr_t);
+ }
+
+ tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
+ datap);
+ tx_msdu_info->htt.info.ethertype = typeorlength;
+ }
+}
+
+static inline A_UINT8
+ol_tx_tid_by_ether_type(
+ A_UINT8 *datap,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ A_UINT8 tid;
+ A_UINT8 *l3_data_ptr;
+ A_UINT16 typeorlength;
+
+ l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
+ typeorlength = tx_msdu_info->htt.info.ethertype;
+
+ /* IP packet, do packet inspection for TID */
+ if (typeorlength == ETHERTYPE_IPV4) {
+ tid = ol_tx_tid_by_ipv4(l3_data_ptr);
+ } else if (typeorlength == ETHERTYPE_IPV6) {
+ tid = ol_tx_tid_by_ipv6(l3_data_ptr);
+ } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
+ /* EAPOL go with voice priority*/
+ tid = TX_EAPOL_TID;
+ } else if (typeorlength == ETHERTYPE_ARP) {
+ tid = TX_ARP_TID;
+ } else {
+ /* For non-IP case, use default TID */
+ tid = TX_DEFAULT_TID;
+ }
+ return tid;
+}
+
+static inline A_UINT8
+ol_tx_tid_by_raw_type(
+ A_UINT8 *datap,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+
+ /* FIXME: This code does not handle 4 address formats. The QOS field
+ * is not at usual location.
+ */
+ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
+ IEEE80211_FC0_TYPE_DATA) {
+ /* dot11 encapsulated frame */
+ struct ieee80211_qosframe *whqos =
+ (struct ieee80211_qosframe *)datap;
+ if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
+ tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
+ else
+ tid = HTT_NON_QOS_TID;
+ } else {
+ /*
+ * This function should only be applied to data frames.
+ * For management frames, we already know to use
+ * HTT_TX_EXT_TID_MGMT.
+ */
+ qdf_assert(0);
+ }
+ return tid;
+}
+
+static A_UINT8
+ol_tx_tid(
+ struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t tx_nbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
+ A_UINT8 tid;
+
+ if (pdev->frame_format == wlan_frm_fmt_raw) {
+ tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
+
+ ol_tx_set_ether_type(datap, tx_msdu_info);
+ tid = tx_msdu_info->htt.info.ext_tid ==
+ QDF_NBUF_TX_EXT_TID_INVALID ?
+ ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
+ tx_msdu_info->htt.info.ext_tid;
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
+
+ ol_tx_set_ether_type(datap, tx_msdu_info);
+ tid =
+ tx_msdu_info->htt.info.ext_tid ==
+ QDF_NBUF_TX_EXT_TID_INVALID ?
+ ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
+ tx_msdu_info->htt.info.ext_tid;
+ } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ struct llc_snap_hdr_t *llc;
+
+ tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
+ tx_msdu_info->htt.info.l3_hdr_offset =
+ sizeof(struct ieee80211_frame);
+ llc = (struct llc_snap_hdr_t *)
+ (datap + tx_msdu_info->htt.info.l3_hdr_offset);
+ tx_msdu_info->htt.info.ethertype =
+ (llc->ethertype[0] << 8) | llc->ethertype[1];
+ /*
+ * Native WiFi is a special case of "raw" 802.11 header format.
+ * However, we expect that for all cases that use native WiFi,
+ * the TID will be directly specified out of band.
+ */
+ tid = tx_msdu_info->htt.info.ext_tid;
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
+ "Invalid standard frame type: %d\n",
+ pdev->frame_format);
+ qdf_assert(0);
+ tid = HTT_TX_EXT_TID_INVALID;
+ }
+ return tid;
+}
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+static inline
+struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ uint8_t *peer_id)
+{
+ struct ol_txrx_peer_t *peer = NULL;
+
+ if (vdev->hlTdlsFlag) {
+ peer = ol_txrx_find_peer_by_addr(pdev,
+ vdev->hl_tdls_ap_mac_addr.raw,
+ peer_id);
+ if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
+ peer = NULL;
+ } else {
+ if (peer)
+ qdf_atomic_inc(&peer->ref_cnt);
+ }
+ }
+ if (!peer)
+ peer = ol_txrx_assoc_peer_find(vdev);
+
+ return peer;
+}
+
+#else
+struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ uint8_t *peer_id)
+{
+ struct ol_txrx_peer_t *peer = NULL;
+ peer = ol_txrx_assoc_peer_find(vdev);
+
+ return peer;
+}
+
+
+#endif
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_nbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_txrx_peer_t *peer = NULL;
+ struct ol_tx_frms_queue_t *txq = NULL;
+ A_UINT8 *dest_addr;
+ A_UINT8 tid;
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+ u_int8_t peer_id;
+#endif
+
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
+ if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
+ (vdev->opmode == wlan_op_mode_ocb)) {
+ txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
+ tx_msdu_info->htt.info.ext_tid =
+ HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ if (vdev->opmode == wlan_op_mode_sta) {
+ /*
+ * The STA sends a frame with a broadcast
+ * dest addr (DA) as a
+ * unicast frame to the AP's receive addr (RA).
+ * Find the peer object that represents the AP
+ * that the STA is associated with.
+ */
+ peer = ol_txrx_assoc_peer_find(vdev);
+ if (!peer) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Error: STA %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
+ vdev,
+ vdev->mac_addr.raw[0],
+ vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2],
+ vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4],
+ vdev->mac_addr.raw[5]);
+ return NULL; /* error */
+ } else if ((peer->security[
+ OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
+ != htt_sec_type_wapi) &&
+ (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
+ if (true == qdf_nbuf_is_ipv4_dhcp_pkt(
+ tx_nbuf)) {
+ /* DHCP frame to go with
+ * voice priority
+ */
+ txq = &peer->txqs[TX_DHCP_TID];
+ tx_msdu_info->htt.info.ext_tid =
+ TX_DHCP_TID;
+ }
+ }
+ /*
+ * The following line assumes each peer object has a
+ * single ID. This is currently true, and is expected
+ * to remain true.
+ */
+ tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+ } else if (vdev->opmode == wlan_op_mode_ocb) {
+ tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+ /* In OCB mode, don't worry about the peer.
+ *We don't need it. */
+ peer = NULL;
+ } else {
+ tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+ /*
+ * Look up the vdev's BSS peer, so that the
+ * classify_extension function can check whether to
+ * encrypt multicast / broadcast frames.
+ */
+ peer = ol_txrx_peer_find_hash_find(pdev,
+ vdev->mac_addr.raw,
+ 0, 1);
+ if (!peer) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
+ vdev,
+ vdev->mac_addr.raw[0],
+ vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2],
+ vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4],
+ vdev->mac_addr.raw[5]);
+ return NULL; /* error */
+ }
+ }
+ tx_msdu_info->htt.info.is_unicast = false;
+ } else {
+ /* tid would be overwritten for non QoS case*/
+ tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
+ if ((HTT_TX_EXT_TID_INVALID == tid) ||
+ (tid >= OL_TX_NUM_TIDS)) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "%s Error: could not classify packet into valid TID(%d).\n",
+ __func__, tid);
+ return NULL;
+ }
+#ifdef ATH_SUPPORT_WAPI
+ /* Check to see if a frame is a WAI frame */
+ if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
+ /* WAI frames should not be encrypted */
+ tx_msdu_info->htt.action.do_encrypt = 0;
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+ "Tx Frame is a WAI frame\n");
+ }
+#endif /* ATH_SUPPORT_WAPI */
+
+ /*
+ * Find the peer and increment its reference count.
+ * If this vdev is an AP, use the dest addr (DA) to determine
+ * which peer STA this unicast data frame is for.
+ * If this vdev is a STA, the unicast data frame is for the
+ * AP the STA is associated with.
+ */
+ if (vdev->opmode == wlan_op_mode_sta) {
+ /*
+ * TO DO:
+ * To support TDLS, first check if there is a TDLS
+ * peer STA,
+ * and if so, check if the DA matches the TDLS peer
+ * STA's MAC address. If there is no peer TDLS STA,
+ * or if the DA is not the TDLS STA's address,
+ * then the frame is either for the AP itself, or is
+ * supposed to be sent to the AP for forwarding.
+ */
+#if 0
+ if (vdev->num_tdls_peers > 0) {
+ peer = NULL;
+ for (i = 0; i < vdev->num_tdls_peers; i++) {
+ int differs = adf_os_mem_cmp(
+ vdev->tdls_peers[i]->
+ mac_addr.raw,
+ dest_addr,
+ OL_TXRX_MAC_ADDR_LEN);
+ if (!differs) {
+ peer = vdev->tdls_peers[i];
+ break;
+ }
+ }
+ } else {
+ /* send to AP */
+ peer = ol_txrx_assoc_peer_find(vdev);
+ }
+#endif
+
+ peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
+ } else {
+ peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
+ 0, 1);
+ }
+ tx_msdu_info->htt.info.is_unicast = true;
+ if (!peer) {
+ /*
+ * Unicast data xfer can only happen to an
+ * associated peer. It is illegitimate to send unicast
+ * data if there is no peer to send it to.
+ */
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
+ vdev,
+ vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+ return NULL; /* error */
+ }
+ TX_SCHED_DEBUG_PRINT("Peer found\n");
+ if (!peer->qos_capable) {
+ tid = OL_TX_NON_QOS_TID;
+ } else if ((peer->security[
+ OL_TXRX_PEER_SECURITY_UNICAST].sec_type
+ != htt_sec_type_wapi) &&
+ (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
+ if (true == qdf_nbuf_is_ipv4_dhcp_pkt(tx_nbuf))
+ /* DHCP frame to go with voice priority */
+ tid = TX_DHCP_TID;
+ }
+
+ /* Only allow encryption when in authenticated state */
+ if (OL_TXRX_PEER_STATE_AUTH != peer->state)
+ tx_msdu_info->htt.action.do_encrypt = 0;
+
+ txq = &peer->txqs[tid];
+ tx_msdu_info->htt.info.ext_tid = tid;
+ /*
+ * The following line assumes each peer object has a single ID.
+ * This is currently true, and is expected to remain true.
+ */
+ tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+ /*
+ * WORKAROUND - check that the peer ID is valid.
+ * If tx data is provided before ol_rx_peer_map_handler is
+ * called to record the peer ID specified by the target,
+ * then we could end up here with an invalid peer ID.
+ * TO DO: rather than dropping the tx frame, pause the txq it
+ * goes into, then fill in the peer ID for the entries in the
+ * txq when the peer_map event provides the peer ID, and then
+ * unpause the txq.
+ */
+ if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
+ if (peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: remove the peer for invalid peer_id %p\n",
+ __func__, peer);
+ /* remove the peer reference added above */
+ ol_txrx_peer_unref_delete(peer);
+ tx_msdu_info->peer = NULL;
+ }
+ return NULL;
+ }
+ }
+ tx_msdu_info->peer = peer;
+ if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
+ return NULL;
+ /*
+ * If relevant, do a deeper inspection to determine additional
+ * characteristics of the tx frame.
+ * If the frame is invalid, then the txq will be set to NULL to
+ * indicate an error.
+ */
+ OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
+ if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
+ wlan_op_mode_sta && tx_msdu_info->peer !=
+ NULL) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: remove the peer reference %p\n",
+ __func__, peer);
+ /* remove the peer reference added above */
+ ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+ /* Making peer NULL in case if multicast non STA mode */
+ tx_msdu_info->peer = NULL;
+ }
+
+ /* Whether this frame can download though HTT2 data pipe or not. */
+ OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+
+ /* Update Tx Queue info */
+ tx_desc->txq = txq;
+
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+ return txq;
+}
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify_mgmt(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_nbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_txrx_peer_t *peer = NULL;
+ struct ol_tx_frms_queue_t *txq = NULL;
+ A_UINT8 *dest_addr;
+ union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
+
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
+ if (IEEE80211_IS_MULTICAST(dest_addr)) {
+ /*
+ * AP: beacons are broadcast,
+ * public action frames (e.g. extended channel
+ * switch announce) may be broadcast
+ * STA: probe requests can be either broadcast or unicast
+ */
+ txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
+ tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+ tx_msdu_info->peer = NULL;
+ tx_msdu_info->htt.info.is_unicast = 0;
+ } else {
+ /*
+ * Find the peer and increment its reference count.
+ * If this vdev is an AP, use the receiver addr (RA) to
+ * determine which peer STA this unicast mgmt frame is for.
+ * If this vdev is a STA, the unicast mgmt frame is for the
+ * AP the STA is associated with.
+ * Probe request / response and Assoc request / response are
+ * sent before the peer exists - in this case, use the
+ * vdev's default tx queue.
+ */
+ if (vdev->opmode == wlan_op_mode_sta) {
+ /*
+ * TO DO:
+ * To support TDLS, first check if there is a TDLS
+ * peer STA, and if so, check if the DA matches
+ * the TDLS peer STA's MAC address.
+ */
+ peer = ol_txrx_assoc_peer_find(vdev);
+ /*
+ * Some special case(preauth for example) needs to send
+ * unicast mgmt frame to unassociated AP. In such case,
+ * we need to check if dest addr match the associated
+ * peer addr. If not, we set peer as NULL to queue this
+ * frame to vdev queue.
+ */
+ if (peer) {
+ qdf_mem_copy(
+ &local_mac_addr_aligned.raw[0],
+ dest_addr, OL_TXRX_MAC_ADDR_LEN);
+ mac_addr = &local_mac_addr_aligned;
+ if (ol_txrx_peer_find_mac_addr_cmp(
+ mac_addr,
+ &peer->mac_addr) != 0) {
+ qdf_atomic_dec(&peer->ref_cnt);
+ peer = NULL;
+ }
+ }
+ } else {
+ /* find the peer and increment its reference count */
+ peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
+ 0, 1);
+ }
+ tx_msdu_info->peer = peer;
+ if (!peer) {
+ txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
+ tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+ } else {
+ txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
+ tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
+ /*
+ * The following line assumes each peer object has a
+ * single ID. This is currently true, and is expected
+ * to remain true.
+ */
+ tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+ }
+ tx_msdu_info->htt.info.is_unicast = 1;
+ }
+ /*
+ * If relevant, do a deeper inspection to determine additional
+ * characteristics of the tx frame.
+ * If the frame is invalid, then the txq will be set to NULL to
+ * indicate an error.
+ */
+ OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
+ tx_msdu_info, txq);
+
+ /* Whether this frame can download though HTT2 data pipe or not. */
+ OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+
+ /* Update Tx Queue info */
+ tx_desc->txq = txq;
+
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+ return txq;
+}
+
+A_STATUS
+ol_tx_classify_extension(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_msdu,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
+ struct ol_txrx_peer_t *peer;
+ int which_key;
+
+ /*
+ * The following msdu_info fields were already filled in by the
+ * ol_tx entry function or the regular ol_tx_classify function:
+ * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
+ * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
+ * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
+ * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
+ * htt.info.is_unicast (ol_tx_classify)
+ * htt.info.peer_id (ol_tx_classify)
+ * peer (ol_tx_classify)
+ * if (is_unicast) {
+ * htt.info.ethertype (ol_tx_classify)
+ * htt.info.l3_hdr_offset (ol_tx_classify)
+ * }
+ * The following fields need to be filled in by this function:
+ * if (!is_unicast) {
+ * htt.info.ethertype
+ * htt.info.l3_hdr_offset
+ * }
+ * htt.action.band (NOT CURRENTLY USED)
+ * htt.action.do_encrypt
+ * htt.action.do_tx_complete
+ * The following fields are not needed for data frames, and can
+ * be left uninitialized:
+ * htt.info.frame_subtype
+ */
+
+ if (!msdu_info->htt.info.is_unicast) {
+ int l2_hdr_size;
+ A_UINT16 ethertype;
+
+ if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
+ struct ethernet_hdr_t *eh;
+
+ eh = (struct ethernet_hdr_t *)datap;
+ l2_hdr_size = sizeof(*eh);
+ ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
+
+ if (ethertype == ETHERTYPE_VLAN) {
+ struct ethernet_vlan_hdr_t *evh;
+
+ evh = (struct ethernet_vlan_hdr_t *)datap;
+ l2_hdr_size = sizeof(*evh);
+ ethertype = (evh->ethertype[0] << 8) |
+ evh->ethertype[1];
+ }
+
+ if (!IS_ETHERTYPE(ethertype)) {
+ /* 802.3 header*/
+ struct llc_snap_hdr_t *llc =
+ (struct llc_snap_hdr_t *)(datap +
+ l2_hdr_size);
+ ethertype = (llc->ethertype[0] << 8) |
+ llc->ethertype[1];
+ l2_hdr_size += sizeof(*llc);
+ }
+ msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
+ msdu_info->htt.info.ethertype = ethertype;
+ } else { /* 802.11 */
+ struct llc_snap_hdr_t *llc;
+ l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
+ llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
+ ethertype = (llc->ethertype[0] << 8) |
+ llc->ethertype[1];
+ /*
+ * Don't include the LLC/SNAP header in l2_hdr_size,
+ * because l3_hdr_offset is actually supposed to refer
+ * to the header after the 802.3 or 802.11 header,
+ * which could be a LLC/SNAP header rather
+ * than the L3 header.
+ */
+ }
+ msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
+ msdu_info->htt.info.ethertype = ethertype;
+ which_key = txrx_sec_mcast;
+ } else {
+ which_key = txrx_sec_ucast;
+ }
+ peer = msdu_info->peer;
+ /*
+ * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
+ * Add more check here.
+ */
+ msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
+ (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
+ msdu_info->htt.action.do_encrypt;
+ /*
+ * For systems that have a frame by frame spec for whether to receive
+ * a tx completion notification, use the tx completion notification
+ * only for certain management frames, not for data frames.
+ * (In the future, this may be changed slightly, e.g. to request a
+ * tx completion notification for the final EAPOL message sent by a
+ * STA during the key delivery handshake.)
+ */
+ msdu_info->htt.action.do_tx_complete = 0;
+
+ return A_OK;
+}
+
+A_STATUS
+ol_tx_classify_mgmt_extension(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t tx_msdu,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ieee80211_frame *wh;
+
+ /*
+ * The following msdu_info fields were already filled in by the
+ * ol_tx entry function or the regular ol_tx_classify_mgmt function:
+ * htt.info.vdev_id (ol_txrx_mgmt_send)
+ * htt.info.frame_type (ol_txrx_mgmt_send)
+ * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
+ * htt.action.do_tx_complete (ol_txrx_mgmt_send)
+ * htt.info.peer_id (ol_tx_classify_mgmt)
+ * htt.info.ext_tid (ol_tx_classify_mgmt)
+ * htt.info.is_unicast (ol_tx_classify_mgmt)
+ * peer (ol_tx_classify_mgmt)
+ * The following fields need to be filled in by this function:
+ * htt.info.frame_subtype
+ * htt.info.l3_hdr_offset
+ * htt.action.band (NOT CURRENTLY USED)
+ * The following fields are not needed for mgmt frames, and can
+ * be left uninitialized:
+ * htt.info.ethertype
+ * htt.action.do_encrypt
+ * (This will be filled in by other SW, which knows whether
+ * the peer has robust-managment-frames enabled.)
+ */
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
+ msdu_info->htt.info.frame_subtype =
+ (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
+ IEEE80211_FC0_SUBTYPE_SHIFT;
+ msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
+
+ return A_OK;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
diff --git a/core/dp/txrx/ol_tx_classify.h b/core/dp/txrx/ol_tx_classify.h
new file mode 100644
index 0000000..159897a
--- /dev/null
+++ b/core/dp/txrx/ol_tx_classify.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_classify.h
+ * @brief API definitions for the tx classify module within the data SW.
+ */
+#ifndef _OL_TX_CLASSIFY__H_
+#define _OL_TX_CLASSIFY__H_
+
+#include <qdf_nbuf.h> /* qdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+
+static inline u_int8_t *
+ol_tx_dest_addr_find(
+ struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t tx_nbuf)
+{
+ u_int8_t *hdr_ptr;
+ void *datap = qdf_nbuf_data(tx_nbuf);
+
+ if (pdev->frame_format == wlan_frm_fmt_raw) {
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh =
+ (struct ieee80211_frame *)datap;
+ hdr_ptr = wh->i_addr1;
+ } else if (pdev->frame_format ==
+ wlan_frm_fmt_native_wifi) {
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh = (
+ struct ieee80211_frame *)datap;
+ hdr_ptr = wh->i_addr1;
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ hdr_ptr = datap;
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Invalid standard frame type: %d\n",
+ pdev->frame_format);
+ qdf_assert(0);
+ hdr_ptr = NULL;
+ }
+ return hdr_ptr;
+}
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * @brief Classify a tx frame to which tid queue.
+ *
+ * @param vdev - the virtual device sending the data
+ * (for specifying the transmitter address for multicast / broadcast data)
+ * @param tx_desc - descriptor object with meta-data about the tx frame
+ * @param netbuf - the tx frame
+ * @param tx_msdu_info - characteristics of the tx frame
+ */
+struct ol_tx_frms_queue_t *
+ol_tx_classify(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify_mgmt(
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ qdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+#else
+
+#define ol_tx_classify(vdev, tx_desc, netbuf, tx_msdu_info) NULL
+#define ol_tx_classify_mgmt(vdev, tx_desc, netbuf, tx_msdu_info) NULL
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+
+#endif /* _OL_TX_CLASSIFY__H_ */
diff --git a/core/dp/txrx/ol_tx_desc.c b/core/dp/txrx/ol_tx_desc.c
index 5635abc..04d18c4 100644
--- a/core/dp/txrx/ol_tx_desc.c
+++ b/core/dp/txrx/ol_tx_desc.c
@@ -105,7 +105,56 @@
}
#endif
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_tx_desc_vdev_update() - vedv assign.
+ * @tx_desc: tx descriptor pointer
+ * @vdev: vdev handle
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
+ struct ol_txrx_vdev_t *vdev)
+{
+ tx_desc->vdev = vdev;
+}
+#else
+
+static inline void
+ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
+ struct ol_txrx_vdev_t *vdev)
+{
+ return;
+}
+#endif
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+
+/**
+ * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
+ * @vdev: vdev handle
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
+{
+ qdf_atomic_inc(&vdev->tx_desc_count);
+}
+#else
+
+static inline void
+ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
+{
+ return;
+}
+
+#endif
+
#ifndef QCA_LL_TX_FLOW_CONTROL_V2
+
/**
* ol_tx_desc_alloc() - allocate descriptor from freelist
* @pdev: pdev handle
@@ -127,6 +176,13 @@
ol_tx_desc_compute_delay(tx_desc);
}
qdf_spin_unlock_bh(&pdev->tx_mutex);
+
+ if (!tx_desc)
+ return NULL;
+
+ ol_tx_desc_vdev_update(tx_desc, vdev);
+ ol_tx_desc_count_inc(vdev);
+
return tx_desc;
}
@@ -220,6 +276,53 @@
#endif
#endif
+/**
+ * ol_tx_desc_alloc_hl() - allocate tx descriptor
+ * @pdev: pdev handle
+ * @vdev: vdev handle
+ * @msdu_info: tx msdu info
+ *
+ * Return: tx descriptor pointer/ NULL in case of error
+ */
+static struct ol_tx_desc_t *
+ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc;
+
+ tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
+ if (!tx_desc)
+ return NULL;
+
+ qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
+
+ return tx_desc;
+}
+
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
+ * @tx_desc: tx desc
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
+{
+ qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
+ tx_desc->vdev = NULL;
+}
+#else
+
+static inline void
+ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
+{
+ return;
+}
+#endif
+
#ifndef QCA_LL_TX_FLOW_CONTROL_V2
/**
* ol_tx_desc_free() - put descriptor to freelist
@@ -246,6 +349,8 @@
ol_tx_desc_reset_timestamp(tx_desc);
ol_tx_put_desc_global_pool(pdev, tx_desc);
+ ol_tx_desc_vdev_rm(tx_desc);
+
qdf_spin_unlock_bh(&pdev->tx_mutex);
}
@@ -313,7 +418,7 @@
dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
{
qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
- qdf_nbuf_data(nbuf), nbuf_paddr, len);
+ qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len);
print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
qdf_nbuf_data(nbuf), len, true);
}
@@ -491,6 +596,52 @@
return tx_desc;
}
+struct ol_tx_desc_t *
+ol_tx_desc_hl(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc;
+
+ /* FIX THIS: these inits should probably be done by tx classify */
+ msdu_info->htt.info.vdev_id = vdev->vdev_id;
+ msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
+ msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
+ switch (qdf_nbuf_get_exemption_type(netbuf)) {
+ case QDF_NBUF_EXEMPT_NO_EXEMPTION:
+ case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
+ /* We want to encrypt this frame */
+ msdu_info->htt.action.do_encrypt = 1;
+ break;
+ case QDF_NBUF_EXEMPT_ALWAYS:
+ /* We don't want to encrypt this frame */
+ msdu_info->htt.action.do_encrypt = 0;
+ break;
+ default:
+ qdf_assert(0);
+ break;
+ }
+
+ /* allocate the descriptor */
+ tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
+ if (!tx_desc)
+ return NULL;
+
+ /* initialize the SW tx descriptor */
+ tx_desc->netbuf = netbuf;
+ /* fix this - get pkt_type from msdu_info */
+ tx_desc->pkt_type = OL_TX_FRM_STD;
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ tx_desc->orig_l2_hdr_bytes = 0;
+#endif
+ /* the HW tx descriptor will be initialized later by the caller */
+
+ return tx_desc;
+}
+
void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
ol_tx_desc_list *tx_descs, int had_error)
{
diff --git a/core/dp/txrx/ol_tx_desc.h b/core/dp/txrx/ol_tx_desc.h
index 2509fec..4842a8a 100644
--- a/core/dp/txrx/ol_tx_desc.h
+++ b/core/dp/txrx/ol_tx_desc.h
@@ -72,6 +72,31 @@
qdf_nbuf_t netbuf,
struct ol_txrx_msdu_info_t *msdu_info);
+
+/**
+ * @brief Allocate and initialize a tx descriptor for a HL system.
+ * @details
+ * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
+ * for private use within the host data SW, and a HTT tx descriptor for
+ * downloading tx meta-data to the target FW/HW.
+ * Fill in the fields of this pair of tx descriptors based on the
+ * information in the netbuf.
+ *
+ * @param pdev - the data physical device sending the data
+ * (for accessing the tx desc pool)
+ * @param vdev - the virtual device sending the data
+ * (for specifying the transmitter address for multicast / broadcast data)
+ * @param netbuf - the tx frame
+ * @param msdu_info - tx meta-data
+ */
+struct ol_tx_desc_t *
+ol_tx_desc_hl(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ qdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *msdu_info);
+
+
/**
* @brief Use a tx descriptor ID to find the corresponding desriptor object.
*
diff --git a/core/dp/txrx/ol_tx_queue.c b/core/dp/txrx/ol_tx_queue.c
index 5351a8e..c57db12 100644
--- a/core/dp/txrx/ol_tx_queue.c
+++ b/core/dp/txrx/ol_tx_queue.c
@@ -37,10 +37,1609 @@
#include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
#include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
#include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
+#include <ol_tx_sched.h> /* ol_tx_sched_notify, etc. */
#include <ol_tx_queue.h>
+#include <ol_txrx.h> /* ol_tx_desc_pool_size_hl */
#include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
#include <qdf_types.h> /* bool */
#include "cdp_txrx_flow_ctrl_legacy.h"
+#include <ol_txrx_peer_find.h>
+
+#if defined(CONFIG_HL_SUPPORT)
+
+#ifndef offsetof
+#define offsetof(type, field) ((qdf_size_t)(&((type *)0)->field))
+#endif
+
+/*--- function prototypes for optional host ADDBA negotiation ---------------*/
+
+#define OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info) /* no-op */
+
+#ifndef container_of
+#define container_of(ptr, type, member) ((type *)( \
+ (char *)(ptr) - (char *)(&((type *)0)->member)))
+#endif
+/*--- function definitions --------------------------------------------------*/
+
+/**
+ * ol_tx_queue_vdev_flush() - try to flush pending frames in the tx queues
+ * no matter it's queued in the TX scheduler or not
+ * @pdev: the physical device object
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+static void
+ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
+{
+#define PEER_ARRAY_COUNT 10
+ struct ol_tx_frms_queue_t *txq;
+ struct ol_txrx_peer_t *peer, *peers[PEER_ARRAY_COUNT];
+ int i, j, peer_count;
+
+ /* flush VDEV TX queues */
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ txq = &vdev->txqs[i];
+ ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS));
+ }
+ /* flush PEER TX queues */
+ do {
+ peer_count = 0;
+ /* select candidate peers */
+ qdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ txq = &peer->txqs[i];
+ if (txq->frms) {
+ qdf_atomic_inc(&peer->ref_cnt);
+ peers[peer_count++] = peer;
+ break;
+ }
+ }
+ if (peer_count >= PEER_ARRAY_COUNT)
+ break;
+ }
+ qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ /* flush TX queues of candidate peers */
+ for (i = 0; i < peer_count; i++) {
+ for (j = 0; j < OL_TX_NUM_TIDS; j++) {
+ txq = &peers[i]->txqs[j];
+ if (txq->frms)
+ ol_tx_queue_free(pdev, txq, j);
+ }
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Delete Peer %p\n", __func__, peer);
+ ol_txrx_peer_unref_delete(peers[i]);
+ }
+ } while (peer_count >= PEER_ARRAY_COUNT);
+}
+
+/**
+ * ol_tx_queue_flush() - try to flush pending frames in the tx queues
+ * no matter it's queued in the TX scheduler or not
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_queue_flush(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_txrx_vdev_t *vdev;
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ ol_tx_queue_vdev_flush(pdev, vdev);
+ }
+}
+
+void
+ol_tx_queue_discard(
+ struct ol_txrx_pdev_t *pdev,
+ bool flush_all,
+ ol_tx_desc_list *tx_descs)
+{
+ u_int16_t num;
+ u_int16_t discarded, actual_discarded = 0;
+
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ if (flush_all == true)
+ /* flush all the pending tx queues in the scheduler */
+ num = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) -
+ qdf_atomic_read(&pdev->tx_queue.rsrc_cnt);
+ else
+ num = pdev->tx_queue.rsrc_threshold_hi -
+ pdev->tx_queue.rsrc_threshold_lo;
+
+ TX_SCHED_DEBUG_PRINT("+%s : %u\n,", __func__,
+ qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
+ while (num > 0) {
+ discarded = ol_tx_sched_discard_select(
+ pdev, (u_int16_t)num, tx_descs, flush_all);
+ if (discarded == 0)
+ /*
+ * No more packets could be discarded.
+ * Probably tx queues are empty.
+ */
+ break;
+
+ num -= discarded;
+ actual_discarded += discarded;
+ }
+ qdf_atomic_add(actual_discarded, &pdev->tx_queue.rsrc_cnt);
+ TX_SCHED_DEBUG_PRINT("-%s\n", __func__);
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+ if (flush_all == true && num > 0)
+ /*
+ * try to flush pending frames in the tx queues
+ * which are not queued in the TX scheduler.
+ */
+ ol_tx_queue_flush(pdev);
+}
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+
+/**
+ * is_ol_tx_discard_frames_success() - check whether currently queued tx frames
+ * can be discarded or not
+ * @pdev: the physical device object
+ * @tx_desc: tx desciptor ptr
+ *
+ * Return: Success if available tx descriptors are too few
+ */
+static bool
+is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc)
+{
+ ol_txrx_vdev_handle vdev;
+ vdev = tx_desc->vdev;
+ return qdf_atomic_read(&vdev->tx_desc_count) >
+ ((ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) >> 1)
+ - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED);
+}
+#else
+
+static inline bool
+is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc)
+{
+ return qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
+ pdev->tx_queue.rsrc_threshold_lo;
+}
+#endif
+
+void
+ol_tx_enqueue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_desc_t *tx_desc,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ int bytes;
+ struct ol_tx_sched_notify_ctx_t notify_ctx;
+
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+ /*
+ * If too few tx descriptors are available, drop some currently-queued
+ * tx frames, to provide enough tx descriptors for new frames, which
+ * may be higher priority than the current frames.
+ */
+ if (is_ol_tx_discard_frames_success(pdev, tx_desc)) {
+ ol_tx_desc_list tx_descs;
+ TAILQ_INIT(&tx_descs);
+ ol_tx_queue_discard(pdev, false, &tx_descs);
+ /*Discard Frames in Discard List*/
+ ol_tx_desc_frame_list_free(pdev, &tx_descs, 1 /* error */);
+ }
+
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ TAILQ_INSERT_TAIL(&txq->head, tx_desc, tx_desc_list_elem);
+
+ bytes = qdf_nbuf_len(tx_desc->netbuf);
+ txq->frms++;
+ txq->bytes += bytes;
+ ol_tx_queue_log_enqueue(pdev, tx_msdu_info, 1, bytes);
+
+ if (txq->flag != ol_tx_queue_paused) {
+ notify_ctx.event = OL_TX_ENQUEUE_FRAME;
+ notify_ctx.frames = 1;
+ notify_ctx.bytes = qdf_nbuf_len(tx_desc->netbuf);
+ notify_ctx.txq = txq;
+ notify_ctx.info.tx_msdu_info = tx_msdu_info;
+ ol_tx_sched_notify(pdev, ¬ify_ctx);
+ txq->flag = ol_tx_queue_active;
+ }
+
+ if (!ETHERTYPE_IS_EAPOL_WAPI(tx_msdu_info->htt.info.ethertype))
+ OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info);
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+u_int16_t
+ol_tx_dequeue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ ol_tx_desc_list *head,
+ u_int16_t max_frames,
+ u_int32_t *credit,
+ int *bytes)
+{
+ u_int16_t num_frames;
+ int bytes_sum;
+ unsigned credit_sum;
+
+ TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+ if (txq->frms < max_frames)
+ max_frames = txq->frms;
+
+ bytes_sum = 0;
+ credit_sum = 0;
+ for (num_frames = 0; num_frames < max_frames; num_frames++) {
+ unsigned frame_credit;
+ struct ol_tx_desc_t *tx_desc;
+ tx_desc = TAILQ_FIRST(&txq->head);
+
+ frame_credit = htt_tx_msdu_credit(tx_desc->netbuf);
+ if (credit_sum + frame_credit > *credit)
+ break;
+
+ credit_sum += frame_credit;
+ bytes_sum += qdf_nbuf_len(tx_desc->netbuf);
+ TAILQ_REMOVE(&txq->head, tx_desc, tx_desc_list_elem);
+ TAILQ_INSERT_TAIL(head, tx_desc, tx_desc_list_elem);
+ }
+ txq->frms -= num_frames;
+ txq->bytes -= bytes_sum;
+ /* a paused queue remains paused, regardless of whether it has frames */
+ if (txq->frms == 0 && txq->flag == ol_tx_queue_active)
+ txq->flag = ol_tx_queue_empty;
+
+ ol_tx_queue_log_dequeue(pdev, txq, num_frames, bytes_sum);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+
+ *bytes = bytes_sum;
+ *credit = credit_sum;
+ return num_frames;
+}
+
+void
+ol_tx_queue_free(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid)
+{
+ int frms = 0, bytes = 0;
+ struct ol_tx_desc_t *tx_desc;
+ struct ol_tx_sched_notify_ctx_t notify_ctx;
+ ol_tx_desc_list tx_tmp_list;
+
+ TAILQ_INIT(&tx_tmp_list);
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ notify_ctx.event = OL_TX_DELETE_QUEUE;
+ notify_ctx.txq = txq;
+ notify_ctx.info.ext_tid = tid;
+ ol_tx_sched_notify(pdev, ¬ify_ctx);
+
+ frms = txq->frms;
+ tx_desc = TAILQ_FIRST(&txq->head);
+ while (txq->frms) {
+ bytes += qdf_nbuf_len(tx_desc->netbuf);
+ txq->frms--;
+ tx_desc = TAILQ_NEXT(tx_desc, tx_desc_list_elem);
+ }
+ ol_tx_queue_log_free(pdev, txq, tid, frms, bytes);
+ txq->bytes -= bytes;
+ ol_tx_queue_log_free(pdev, txq, tid, frms, bytes);
+ txq->flag = ol_tx_queue_empty;
+ /* txq->head gets reset during the TAILQ_CONCAT call */
+ TAILQ_CONCAT(&tx_tmp_list, &txq->head, tx_desc_list_elem);
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ /* free tx frames without holding tx_queue_spinlock */
+ qdf_atomic_add(frms, &pdev->tx_queue.rsrc_cnt);
+ while (frms) {
+ tx_desc = TAILQ_FIRST(&tx_tmp_list);
+ TAILQ_REMOVE(&tx_tmp_list, tx_desc, tx_desc_list_elem);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 0);
+ frms--;
+ }
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+
+/*--- queue pause / unpause functions ---------------------------------------*/
+
+/**
+ * ol_txrx_peer_tid_pause_base() - suspend/pause txq for a given tid given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ * @tid: tid for which queue needs to be paused
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_tid_pause_base(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ int tid)
+{
+ struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
+
+ if (txq->paused_count.total++ == 0) {
+ struct ol_tx_sched_notify_ctx_t notify_ctx;
+
+ notify_ctx.event = OL_TX_PAUSE_QUEUE;
+ notify_ctx.txq = txq;
+ notify_ctx.info.ext_tid = tid;
+ ol_tx_sched_notify(pdev, ¬ify_ctx);
+ txq->flag = ol_tx_queue_paused;
+ }
+}
+#ifdef QCA_BAD_PEER_TX_FLOW_CL
+
+/**
+ * ol_txrx_peer_pause_but_no_mgmt_q_base() - suspend/pause all txqs except
+ * management queue for a given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_pause_but_no_mgmt_q_base(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int i;
+ for (i = 0; i < OL_TX_MGMT_TID; i++)
+ ol_txrx_peer_tid_pause_base(pdev, peer, i);
+}
+#endif
+
+
+/**
+ * ol_txrx_peer_pause_base() - suspend/pause all txqs for a given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_pause_base(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int i;
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
+ ol_txrx_peer_tid_pause_base(pdev, peer, i);
+}
+
+/**
+ * ol_txrx_peer_tid_unpause_base() - unpause txq for a given tid given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ * @tid: tid for which queue needs to be unpaused
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_tid_unpause_base(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ int tid)
+{
+ struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
+ /*
+ * Don't actually unpause the tx queue until all pause requests
+ * have been removed.
+ */
+ TXRX_ASSERT2(txq->paused_count.total > 0);
+ /* return, if not already paused */
+ if (txq->paused_count.total == 0)
+ return;
+
+ if (--txq->paused_count.total == 0) {
+ struct ol_tx_sched_notify_ctx_t notify_ctx;
+
+ notify_ctx.event = OL_TX_UNPAUSE_QUEUE;
+ notify_ctx.txq = txq;
+ notify_ctx.info.ext_tid = tid;
+ ol_tx_sched_notify(pdev, ¬ify_ctx);
+
+ if (txq->frms == 0) {
+ txq->flag = ol_tx_queue_empty;
+ } else {
+ txq->flag = ol_tx_queue_active;
+ /*
+ * Now that the are new tx frames available to download,
+ * invoke the scheduling function, to see if it wants to
+ * download the new frames.
+ * Since the queue lock is currently held, and since
+ * the scheduler function takes the lock, temporarily
+ * release the lock.
+ */
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ ol_tx_sched(pdev);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ }
+ }
+}
+#ifdef QCA_BAD_PEER_TX_FLOW_CL
+/**
+ * ol_txrx_peer_unpause_but_no_mgmt_q_base() - unpause all txqs except
+ * management queue for a given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_unpause_but_no_mgmt_q_base(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int i;
+ for (i = 0; i < OL_TX_MGMT_TID; i++)
+ ol_txrx_peer_tid_unpause_base(pdev, peer, i);
+}
+#endif
+
+void
+ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+
+ /* TO DO: log the queue unpause */
+
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ if (tid == -1) {
+ int i;
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
+ ol_txrx_peer_tid_unpause_base(pdev, peer, i);
+
+ } else {
+ ol_txrx_peer_tid_unpause_base(pdev, peer, tid);
+ }
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void
+ol_txrx_throttle_pause(ol_txrx_pdev_handle pdev)
+{
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+ qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
+
+ if (pdev->tx_throttle.is_paused == true) {
+ qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
+ return;
+ }
+
+ pdev->tx_throttle.is_paused = true;
+ qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
+#endif
+ ol_txrx_pdev_pause(pdev, 0);
+}
+
+void
+ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)
+{
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+ qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
+
+ if (pdev->tx_throttle.is_paused == false) {
+ qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
+ return;
+ }
+
+ pdev->tx_throttle.is_paused = false;
+ qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
+#endif
+ ol_txrx_pdev_unpause(pdev, 0);
+}
+
+void
+ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_txrx_peer_t *peer;
+ /* TO DO: log the queue pause */
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+
+ /* use peer_ref_mutex before accessing peer_list */
+ qdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ ol_txrx_peer_pause_base(pdev, peer);
+ }
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_txrx_peer_t *peer;
+ /* TO DO: log the queue unpause */
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+
+
+ /* take peer_ref_mutex before accessing peer_list */
+ qdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ int i;
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
+ ol_txrx_peer_tid_unpause_base(pdev, peer, i);
+ }
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
+{
+ ol_tx_queue_vdev_flush(vdev->pdev, vdev);
+}
+
+#ifdef QCA_BAD_PEER_TX_FLOW_CL
+
+/**
+ * ol_txrx_peer_bal_add_limit_peer() - add one peer into limit list
+ * @pdev: Pointer to PDEV structure.
+ * @peer_id: Peer Identifier.
+ * @peer_limit Peer limit threshold
+ *
+ * Add one peer into the limit list of pdev
+ * Note that the peer limit info will be also updated
+ * If it is the first time, start the timer
+ *
+ * Return: None
+ */
+void
+ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id, u_int16_t peer_limit)
+{
+ u_int16_t i, existed = 0;
+ struct ol_txrx_peer_t *peer = NULL;
+
+ for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
+ if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
+ existed = 1;
+ break;
+ }
+ }
+
+ if (!existed) {
+ u_int32_t peer_num = pdev->tx_peer_bal.peer_num;
+ /* Check if peer_num has reached the capabilit */
+ if (peer_num >= MAX_NO_PEERS_IN_LIMIT) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "reach the maxinum peer num %d\n",
+ peer_num);
+ return;
+ }
+ pdev->tx_peer_bal.limit_list[peer_num].peer_id = peer_id;
+ pdev->tx_peer_bal.limit_list[peer_num].limit_flag = true;
+ pdev->tx_peer_bal.limit_list[peer_num].limit = peer_limit;
+ pdev->tx_peer_bal.peer_num++;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer) {
+ peer->tx_limit_flag = true;
+ peer->tx_limit = peer_limit;
+ }
+
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "Add one peer into limit queue, peer_id %d, cur peer num %d\n",
+ peer_id,
+ pdev->tx_peer_bal.peer_num);
+ }
+
+ /* Only start the timer once */
+ if (pdev->tx_peer_bal.peer_bal_timer_state ==
+ ol_tx_peer_bal_timer_inactive) {
+ qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
+ pdev->tx_peer_bal.peer_bal_period_ms);
+ pdev->tx_peer_bal.peer_bal_timer_state =
+ ol_tx_peer_bal_timer_active;
+ }
+}
+
+/**
+ * ol_txrx_peer_bal_remove_limit_peer() - remove one peer from limit list
+ * @pdev: Pointer to PDEV structure.
+ * @peer_id: Peer Identifier.
+ *
+ * Remove one peer from the limit list of pdev
+ * Note that Only stop the timer if no peer in limit state
+ *
+ * Return: NULL
+ */
+void
+ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id)
+{
+ u_int16_t i;
+ struct ol_txrx_peer_t *peer = NULL;
+
+ for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
+ if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
+ pdev->tx_peer_bal.limit_list[i] =
+ pdev->tx_peer_bal.limit_list[
+ pdev->tx_peer_bal.peer_num - 1];
+ pdev->tx_peer_bal.peer_num--;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer)
+ peer->tx_limit_flag = false;
+
+
+ TX_SCHED_DEBUG_PRINT(
+ "Remove one peer from limitq, peer_id %d, cur peer num %d\n",
+ peer_id,
+ pdev->tx_peer_bal.peer_num);
+ break;
+ }
+ }
+
+ /* Only stop the timer if no peer in limit state */
+ if (pdev->tx_peer_bal.peer_num == 0) {
+ qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
+ pdev->tx_peer_bal.peer_bal_timer_state =
+ ol_tx_peer_bal_timer_inactive;
+ }
+}
+
+void
+ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+
+ /* TO DO: log the queue pause */
+
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ ol_txrx_peer_pause_but_no_mgmt_q_base(pdev, peer);
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void
+ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+
+ /* TO DO: log the queue pause */
+
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+
+ ol_txrx_peer_unpause_but_no_mgmt_q_base(pdev, peer);
+
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+u_int16_t
+ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
+ u_int16_t max_frames,
+ u_int16_t *tx_limit_flag)
+{
+ if (txq && (txq->peer) && (txq->peer->tx_limit_flag) &&
+ (txq->peer->tx_limit < max_frames)) {
+ TX_SCHED_DEBUG_PRINT(
+ "Peer ID %d goes to limit, threshold is %d\n",
+ txq->peer->peer_ids[0], txq->peer->tx_limit);
+ *tx_limit_flag = 1;
+ return txq->peer->tx_limit;
+ } else {
+ return max_frames;
+ }
+}
+
+void
+ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int16_t frames,
+ u_int16_t tx_limit_flag)
+{
+ qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
+ if (txq && tx_limit_flag && (txq->peer) &&
+ (txq->peer->tx_limit_flag)) {
+ if (txq->peer->tx_limit < frames)
+ txq->peer->tx_limit = 0;
+ else
+ txq->peer->tx_limit -= frames;
+
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "Peer ID %d in limit, deque %d frms\n",
+ txq->peer->peer_ids[0], frames);
+ } else if (txq->peer) {
+ TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d\n",
+ txq->peer->peer_ids[0], frames);
+ }
+ qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
+}
+
+void
+ol_txrx_bad_peer_txctl_set_setting(struct ol_txrx_pdev_t *pdev,
+ int enable, int period, int txq_limit)
+{
+ if (enable)
+ pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
+ else
+ pdev->tx_peer_bal.enabled = ol_tx_peer_bal_disable;
+
+ /* Set the current settingl */
+ pdev->tx_peer_bal.peer_bal_period_ms = period;
+ pdev->tx_peer_bal.peer_bal_txq_limit = txq_limit;
+}
+
+void
+ol_txrx_bad_peer_txctl_update_threshold(struct ol_txrx_pdev_t *pdev,
+ int level, int tput_thresh,
+ int tx_limit)
+{
+ /* Set the current settingl */
+ pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
+ tput_thresh;
+ pdev->tx_peer_bal.ctl_thresh[level].tx_limit =
+ tx_limit;
+}
+
+/**
+ * ol_tx_pdev_peer_bal_timer() - timer function
+ * @context: context of timer function
+ *
+ * Return: None
+ */
+void
+ol_tx_pdev_peer_bal_timer(void *context)
+{
+ int i;
+ struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
+
+ qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
+
+ for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
+ if (pdev->tx_peer_bal.limit_list[i].limit_flag) {
+ u_int16_t peer_id =
+ pdev->tx_peer_bal.limit_list[i].peer_id;
+ u_int16_t tx_limit =
+ pdev->tx_peer_bal.limit_list[i].limit;
+
+ struct ol_txrx_peer_t *peer = NULL;
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ TX_SCHED_DEBUG_PRINT(
+ "%s peer_id %d peer = 0x%x tx limit %d\n",
+ __func__, peer_id,
+ (int)peer, tx_limit);
+
+ /* It is possible the peer limit is still not 0,
+ but it is the scenario should not be cared */
+ if (peer) {
+ peer->tx_limit = tx_limit;
+ } else {
+ ol_txrx_peer_bal_remove_limit_peer(pdev,
+ peer_id);
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "No such a peer, peer id = %d\n",
+ peer_id);
+ }
+ }
+ }
+
+ qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
+
+ if (pdev->tx_peer_bal.peer_num) {
+ ol_tx_sched(pdev);
+ qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
+ pdev->tx_peer_bal.peer_bal_period_ms);
+ }
+}
+
+void
+ol_txrx_set_txq_peer(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_txrx_peer_t *peer)
+{
+ if (txq)
+ txq->peer = peer;
+}
+
+void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
+{
+ u_int32_t timer_period;
+
+ qdf_spinlock_create(&pdev->tx_peer_bal.mutex);
+ pdev->tx_peer_bal.peer_num = 0;
+ pdev->tx_peer_bal.peer_bal_timer_state
+ = ol_tx_peer_bal_timer_inactive;
+
+ timer_period = 2000;
+ pdev->tx_peer_bal.peer_bal_period_ms = timer_period;
+
+ qdf_timer_init(
+ pdev->osdev,
+ &pdev->tx_peer_bal.peer_bal_timer,
+ ol_tx_pdev_peer_bal_timer,
+ pdev, QDF_TIMER_TYPE_SW);
+}
+
+void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
+ pdev->tx_peer_bal.peer_bal_timer_state =
+ ol_tx_peer_bal_timer_inactive;
+ qdf_timer_free(&pdev->tx_peer_bal.peer_bal_timer);
+ qdf_spinlock_destroy(&pdev->tx_peer_bal.mutex);
+}
+
+void
+ol_txrx_peer_link_status_handler(
+ ol_txrx_pdev_handle pdev,
+ u_int16_t peer_num,
+ struct rate_report_t *peer_link_status)
+{
+ u_int16_t i = 0;
+ struct ol_txrx_peer_t *peer = NULL;
+
+ if (NULL == pdev) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler\n");
+ return;
+ }
+
+ if (NULL == peer_link_status) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "Error:NULL link report message. peer num %d\n",
+ peer_num);
+ return;
+ }
+
+ /* Check if bad peer tx flow CL is enabled */
+ if (pdev->tx_peer_bal.enabled != ol_tx_peer_bal_enable) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "Bad peer tx flow CL is not enabled, ignore it\n");
+ return;
+ }
+
+ /* Check peer_num is reasonable */
+ if (peer_num > MAX_NO_PEERS_IN_LIMIT) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "%s: Bad peer_num %d\n", __func__, peer_num);
+ return;
+ }
+
+ TX_SCHED_DEBUG_PRINT_ALWAYS("%s: peer_num %d\n", __func__, peer_num);
+
+ for (i = 0; i < peer_num; i++) {
+ u_int16_t peer_limit, peer_id;
+ u_int16_t pause_flag, unpause_flag;
+ u_int32_t peer_phy, peer_tput;
+
+ peer_id = peer_link_status->id;
+ peer_phy = peer_link_status->phy;
+ peer_tput = peer_link_status->rate;
+
+ TX_SCHED_DEBUG_PRINT("%s: peer id %d tput %d phy %d\n",
+ __func__, peer_id, peer_tput, peer_phy);
+
+ /* Sanity check for the PHY mode value */
+ if (peer_phy > TXRX_IEEE11_AC) {
+ TX_SCHED_DEBUG_PRINT_ALWAYS(
+ "%s: PHY value is illegal: %d, and the peer_id %d\n",
+ __func__, peer_link_status->phy, peer_id);
+ continue;
+ }
+ pause_flag = false;
+ unpause_flag = false;
+ peer_limit = 0;
+
+ /* From now on, PHY, PER info should be all fine */
+ qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
+
+ /* Update link status analysis for each peer */
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer) {
+ u_int32_t thresh, limit, phy;
+ phy = peer_link_status->phy;
+ thresh = pdev->tx_peer_bal.ctl_thresh[phy].tput_thresh;
+ limit = pdev->tx_peer_bal.ctl_thresh[phy].tx_limit;
+
+ if (((peer->tx_pause_flag) || (peer->tx_limit_flag)) &&
+ (peer_tput) && (peer_tput < thresh))
+ peer_limit = limit;
+
+ if (peer_limit) {
+ ol_txrx_peer_bal_add_limit_peer(pdev, peer_id,
+ peer_limit);
+ } else if (pdev->tx_peer_bal.peer_num) {
+ TX_SCHED_DEBUG_PRINT(
+ "%s: Check if peer_id %d exit limit\n",
+ __func__, peer_id);
+ ol_txrx_peer_bal_remove_limit_peer(pdev,
+ peer_id);
+ }
+ if ((peer_tput == 0) &&
+ (peer->tx_pause_flag == false)) {
+ peer->tx_pause_flag = true;
+ pause_flag = true;
+ } else if (peer->tx_pause_flag) {
+ unpause_flag = true;
+ peer->tx_pause_flag = false;
+ }
+ } else {
+ TX_SCHED_DEBUG_PRINT(
+ "%s: Remove peer_id %d from limit list\n",
+ __func__, peer_id);
+ ol_txrx_peer_bal_remove_limit_peer(pdev, peer_id);
+ }
+
+ peer_link_status++;
+ qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
+ if (pause_flag)
+ ol_txrx_peer_pause_but_no_mgmt_q(peer);
+ else if (unpause_flag)
+ ol_txrx_peer_unpause_but_no_mgmt_q(peer);
+ }
+}
+#endif /* QCA_BAD_PEER_TX_FLOW_CL */
+
+/*--- ADDBA triggering functions --------------------------------------------*/
+
+
+/*=== debug functions =======================================================*/
+
+/*--- queue event log -------------------------------------------------------*/
+
+#if defined(DEBUG_HL_LOGGING)
+
+#define negative_sign -1
+
+/**
+ * ol_tx_queue_log_entry_type_info() - log queues entry info
+ * @type: log entry type
+ * @size: size
+ * @align: alignment
+ * @var_size: variable size record
+ *
+ * Return: None
+ */
+static void
+ol_tx_queue_log_entry_type_info(
+ u_int8_t *type, int *size, int *align, int var_size)
+{
+ switch (*type) {
+ case ol_tx_log_entry_type_enqueue:
+ case ol_tx_log_entry_type_dequeue:
+ case ol_tx_log_entry_type_queue_free:
+ *size = sizeof(struct ol_tx_log_queue_add_t);
+ *align = 2;
+ break;
+
+ case ol_tx_log_entry_type_queue_state:
+ *size = offsetof(struct ol_tx_log_queue_state_var_sz_t, data);
+ *align = 4;
+ if (var_size) {
+ /* read the variable-sized record,
+ * to see how large it is
+ */
+ int align_pad;
+ struct ol_tx_log_queue_state_var_sz_t *record;
+
+ align_pad =
+ (*align - ((((u_int32_t) *type) + 1)))
+ & (*align - 1);
+ record = (struct ol_tx_log_queue_state_var_sz_t *)
+ (type + 1 + align_pad);
+ *size += record->num_cats_active *
+ (sizeof(u_int32_t) /* bytes */ +
+ sizeof(u_int16_t) /* frms */);
+ }
+ break;
+
+ /*case ol_tx_log_entry_type_drop:*/
+ default:
+ *size = 0;
+ *align = 0;
+ };
+}
+
+/**
+ * ol_tx_queue_log_oldest_update() - log oldest record
+ * @pdev: pointer to txrx handle
+ * @offset: offset value
+ *
+ * Return: None
+ */
+static void
+ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t *pdev, int offset)
+{
+ int oldest_record_offset;
+
+ /*
+ * If the offset of the oldest record is between the current and
+ * new values of the offset of the newest record, then the oldest
+ * record has to be dropped from the log to provide room for the
+ * newest record.
+ * Advance the offset of the oldest record until it points to a
+ * record that is beyond the new value of the offset of the newest
+ * record.
+ */
+ if (!pdev->txq_log.wrapped)
+ /*
+ * The log has not even filled up yet - no need to remove
+ * the oldest record to make room for a new record.
+ */
+ return;
+
+
+ if (offset > pdev->txq_log.offset) {
+ /*
+ * not wraparound -
+ * The oldest record offset may have already wrapped around,
+ * even if the newest record has not. In this case, then
+ * the oldest record offset is fine where it is.
+ */
+ if (pdev->txq_log.oldest_record_offset == 0)
+ return;
+
+ oldest_record_offset = pdev->txq_log.oldest_record_offset;
+ } else
+ /* wraparound */
+ oldest_record_offset = 0;
+
+
+ while (oldest_record_offset < offset) {
+ int size, align, align_pad;
+ u_int8_t type;
+
+ type = pdev->txq_log.data[oldest_record_offset];
+ if (type == ol_tx_log_entry_type_wrap) {
+ oldest_record_offset = 0;
+ break;
+ }
+ ol_tx_queue_log_entry_type_info(
+ &pdev->txq_log.data[oldest_record_offset],
+ &size, &align, 1);
+ align_pad =
+ (align - ((oldest_record_offset + 1/*type*/)))
+ & (align - 1);
+ /*
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
+ oldest_record_offset, type, size, size + 1 + align_pad);
+ */
+ oldest_record_offset += size + 1 + align_pad;
+ }
+ if (oldest_record_offset >= pdev->txq_log.size)
+ oldest_record_offset = 0;
+
+ pdev->txq_log.oldest_record_offset = oldest_record_offset;
+}
+
+/**
+ * ol_tx_queue_log_alloc() - log data allocation
+ * @pdev: physical device object
+ * @type: ol_tx_log_entry_type
+ * @extra_bytes: extra bytes
+ *
+ *
+ * Return: log element
+ */
+void*
+ol_tx_queue_log_alloc(
+ struct ol_txrx_pdev_t *pdev,
+ u_int8_t type /* ol_tx_log_entry_type */,
+ int extra_bytes)
+{
+ int size, align, align_pad;
+ int offset;
+
+ ol_tx_queue_log_entry_type_info(&type, &size, &align, 0);
+ size += extra_bytes;
+
+ offset = pdev->txq_log.offset;
+ align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
+
+ if (pdev->txq_log.size - offset >= size + 1 + align_pad)
+ /* no need to wrap around */
+ goto alloc_found;
+
+ if (!pdev->txq_log.allow_wrap)
+ return NULL; /* log is full and can't wrap */
+
+ /* handle wrap-around */
+ pdev->txq_log.wrapped = 1;
+ offset = 0;
+ align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
+ /* sanity check that the log is large enough to hold this entry */
+ if (pdev->txq_log.size <= size + 1 + align_pad)
+ return NULL;
+
+
+alloc_found:
+ ol_tx_queue_log_oldest_update(pdev, offset + size + 1 + align_pad);
+ if (offset == 0)
+ pdev->txq_log.data[pdev->txq_log.offset] =
+ ol_tx_log_entry_type_wrap;
+
+ /*
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
+ offset, type, size, size + 1 + align_pad);
+ */
+ pdev->txq_log.data[offset] = type;
+ pdev->txq_log.offset = offset + size + 1 + align_pad;
+ if (pdev->txq_log.offset >= pdev->txq_log.size) {
+ pdev->txq_log.offset = 0;
+ pdev->txq_log.wrapped = 1;
+ }
+ return &pdev->txq_log.data[offset + 1 + align_pad];
+}
+
+/**
+ * ol_tx_queue_log_record_display() - show log record of tx queue
+ * @pdev: pointer to txrx handle
+ * @offset: offset value
+ *
+ * Return: size of record
+ */
+static int
+ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
+{
+ int size, align, align_pad;
+ u_int8_t type;
+ struct ol_txrx_peer_t *peer;
+
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ type = pdev->txq_log.data[offset];
+ ol_tx_queue_log_entry_type_info(
+ &pdev->txq_log.data[offset], &size, &align, 1);
+ align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
+
+ switch (type) {
+ case ol_tx_log_entry_type_enqueue:
+ {
+ struct ol_tx_log_queue_add_t record;
+ qdf_mem_copy(&record,
+ &pdev->txq_log.data[offset + 1 + align_pad],
+ sizeof(struct ol_tx_log_queue_add_t));
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+
+ if (record.peer_id != 0xffff) {
+ peer = ol_txrx_peer_find_by_id(pdev,
+ record.peer_id);
+ if (peer != NULL)
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Q: %6d %5d %3d %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
+ record.num_frms, record.num_bytes,
+ record.tid,
+ record.peer_id,
+ peer->mac_addr.raw[0],
+ peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2],
+ peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4],
+ peer->mac_addr.raw[5]);
+ else
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "Q: %6d %5d %3d %4d",
+ record.num_frms, record.num_bytes,
+ record.tid, record.peer_id);
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_INFO,
+ "Q: %6d %5d %3d from vdev",
+ record.num_frms, record.num_bytes,
+ record.tid);
+ }
+ break;
+ }
+ case ol_tx_log_entry_type_dequeue:
+ {
+ struct ol_tx_log_queue_add_t record;
+ qdf_mem_copy(&record,
+ &pdev->txq_log.data[offset + 1 + align_pad],
+ sizeof(struct ol_tx_log_queue_add_t));
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+
+ if (record.peer_id != 0xffff) {
+ peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
+ if (peer != NULL)
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "DQ: %6d %5d %3d %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
+ record.num_frms, record.num_bytes,
+ record.tid,
+ record.peer_id,
+ peer->mac_addr.raw[0],
+ peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2],
+ peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4],
+ peer->mac_addr.raw[5]);
+ else
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "DQ: %6d %5d %3d %4d",
+ record.num_frms, record.num_bytes,
+ record.tid, record.peer_id);
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_INFO,
+ "DQ: %6d %5d %3d from vdev",
+ record.num_frms, record.num_bytes,
+ record.tid);
+ }
+ break;
+ }
+ case ol_tx_log_entry_type_queue_free:
+ {
+ struct ol_tx_log_queue_add_t record;
+ qdf_mem_copy(&record,
+ &pdev->txq_log.data[offset + 1 + align_pad],
+ sizeof(struct ol_tx_log_queue_add_t));
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+
+ if (record.peer_id != 0xffff) {
+ peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
+ if (peer != NULL)
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "F: %6d %5d %3d %4d (%02x:%02x:%02x:%02x:%02x:%02x)",
+ record.num_frms, record.num_bytes,
+ record.tid,
+ record.peer_id,
+ peer->mac_addr.raw[0],
+ peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2],
+ peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4],
+ peer->mac_addr.raw[5]);
+ else
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "F: %6d %5d %3d %4d",
+ record.num_frms, record.num_bytes,
+ record.tid, record.peer_id);
+ } else {
+ /* shouldn't happen */
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_INFO,
+ "Unexpected vdev queue removal\n");
+ }
+ break;
+ }
+
+ case ol_tx_log_entry_type_queue_state:
+ {
+ int i, j;
+ u_int32_t active_bitmap;
+ struct ol_tx_log_queue_state_var_sz_t record;
+ u_int8_t *data;
+
+ qdf_mem_copy(&record,
+ &pdev->txq_log.data[offset + 1 + align_pad],
+ sizeof(struct ol_tx_log_queue_state_var_sz_t));
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "S: bitmap = %#x",
+ record.active_bitmap);
+ data = &record.data[0];
+ j = 0;
+ i = 0;
+ active_bitmap = record.active_bitmap;
+ while (active_bitmap) {
+ if (active_bitmap & 0x1) {
+ u_int16_t frms;
+ u_int32_t bytes;
+
+ frms = data[0] | (data[1] << 8);
+ bytes = (data[2] << 0) | (data[3] << 8) |
+ (data[4] << 16) | (data[5] << 24);
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "cat %2d: %6d %5d",
+ i, frms, bytes);
+ data += 6;
+ j++;
+ }
+ i++;
+ active_bitmap >>= 1;
+ }
+ break;
+ }
+
+ /*case ol_tx_log_entry_type_drop:*/
+
+ case ol_tx_log_entry_type_wrap:
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ return negative_sign * offset; /* go back to the top */
+
+ default:
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "*** invalid tx log entry type (%d)\n", type);
+ return 0; /* error */
+ };
+
+ return size + 1 + align_pad;
+}
+
+/**
+ * ol_tx_queue_log_display() - show tx queue log
+ * @pdev: pointer to txrx handle
+ *
+ * Return: None
+ */
+void
+ol_tx_queue_log_display(struct ol_txrx_pdev_t *pdev)
+{
+ int offset;
+ int unwrap;
+
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ offset = pdev->txq_log.oldest_record_offset;
+ unwrap = pdev->txq_log.wrapped;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ /*
+ * In theory, this should use mutex to guard against the offset
+ * being changed while in use, but since this is just for debugging,
+ * don't bother.
+ */
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Tx queue log:");
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ ": Frames Bytes TID PEER");
+
+ while (unwrap || offset != pdev->txq_log.offset) {
+ int delta = ol_tx_queue_log_record_display(pdev, offset);
+ if (delta == 0)
+ return; /* error */
+
+ if (delta < 0)
+ unwrap = 0;
+
+ offset += delta;
+ }
+}
+
+void
+ol_tx_queue_log_enqueue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_msdu_info_t *msdu_info,
+ int frms, int bytes)
+{
+ int tid;
+ u_int16_t peer_id = msdu_info->htt.info.peer_id;
+ struct ol_tx_log_queue_add_t *log_elem;
+ tid = msdu_info->htt.info.ext_tid;
+
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_enqueue, 0);
+ if (!log_elem) {
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ return;
+ }
+
+ log_elem->num_frms = frms;
+ log_elem->num_bytes = bytes;
+ log_elem->peer_id = peer_id;
+ log_elem->tid = tid;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+}
+
+void
+ol_tx_queue_log_dequeue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int frms, int bytes)
+{
+ int ext_tid;
+ u_int16_t peer_id;
+ struct ol_tx_log_queue_add_t *log_elem;
+
+ ext_tid = txq->ext_tid;
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_dequeue, 0);
+ if (!log_elem) {
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ return;
+ }
+
+ if (ext_tid < OL_TX_NUM_TIDS) {
+ struct ol_txrx_peer_t *peer;
+ struct ol_tx_frms_queue_t *txq_base;
+
+ txq_base = txq - ext_tid;
+ peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
+ peer_id = peer->peer_ids[0];
+ } else {
+ peer_id = ~0;
+ }
+
+ log_elem->num_frms = frms;
+ log_elem->num_bytes = bytes;
+ log_elem->peer_id = peer_id;
+ log_elem->tid = ext_tid;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+}
+
+void
+ol_tx_queue_log_free(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid, int frms, int bytes)
+{
+ u_int16_t peer_id;
+ struct ol_tx_log_queue_add_t *log_elem;
+
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_queue_free,
+ 0);
+ if (!log_elem) {
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ return;
+ }
+
+ if (tid < OL_TX_NUM_TIDS) {
+ struct ol_txrx_peer_t *peer;
+ struct ol_tx_frms_queue_t *txq_base;
+
+ txq_base = txq - tid;
+ peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
+ peer_id = peer->peer_ids[0];
+ } else {
+ peer_id = ~0;
+ }
+
+ log_elem->num_frms = frms;
+ log_elem->num_bytes = bytes;
+ log_elem->peer_id = peer_id;
+ log_elem->tid = tid;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+}
+
+void
+ol_tx_queue_log_sched(
+ struct ol_txrx_pdev_t *pdev,
+ int credit,
+ int *num_cats,
+ u_int32_t **active_bitmap,
+ u_int8_t **data)
+{
+ int data_size;
+ struct ol_tx_log_queue_state_var_sz_t *log_elem;
+
+ data_size = sizeof(u_int32_t) /* bytes */ +
+ sizeof(u_int16_t) /* frms */;
+ data_size *= *num_cats;
+
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ log_elem = ol_tx_queue_log_alloc(
+ pdev, ol_tx_log_entry_type_queue_state, data_size);
+ if (!log_elem) {
+ *num_cats = 0;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+ return;
+ }
+ log_elem->num_cats_active = *num_cats;
+ log_elem->active_bitmap = 0;
+ log_elem->credit = credit;
+
+ *active_bitmap = &log_elem->active_bitmap;
+ *data = &log_elem->data[0];
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+}
+
+/**
+ * ol_tx_queue_log_clear() - clear tx queue log
+ * @pdev: pointer to txrx handle
+ *
+ * Return: None
+ */
+void
+ol_tx_queue_log_clear(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_spin_lock_bh(&pdev->txq_log_spinlock);
+ qdf_mem_zero(&pdev->txq_log, sizeof(pdev->txq_log));
+ pdev->txq_log.size = OL_TXQ_LOG_SIZE;
+ pdev->txq_log.oldest_record_offset = 0;
+ pdev->txq_log.offset = 0;
+ pdev->txq_log.allow_wrap = 1;
+ pdev->txq_log.wrapped = 0;
+ qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
+}
+#endif /* defined(DEBUG_HL_LOGGING) */
+
+/*--- queue state printouts -------------------------------------------------*/
+
+#if TXRX_DEBUG_LEVEL > 5
+
+/**
+ * ol_tx_queue_display() - show tx queue info
+ * @txq: pointer to txq frames
+ * @indent: indent
+ *
+ * Return: None
+ */
+void
+ol_tx_queue_display(struct ol_tx_frms_queue_t *txq, int indent)
+{
+ char *state;
+
+ state = (txq->flag == ol_tx_queue_active) ? "active" : "paused";
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "%*stxq %p (%s): %d frms, %d bytes\n",
+ indent, " ", txq, state, txq->frms, txq->bytes);
+}
+
+void
+ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_txrx_vdev_t *vdev;
+
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "pdev %p tx queues:\n", pdev);
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ struct ol_txrx_peer_t *peer;
+ int i;
+ for (i = 0; i < QDF_ARRAY_SIZE(vdev->txqs); i++) {
+ if (vdev->txqs[i].frms == 0)
+ continue;
+
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "vdev %d (%p), txq %d\n", vdev->vdev_id,
+ vdev, i);
+ ol_tx_queue_display(&vdev->txqs[i], 4);
+ }
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
+ if (peer->txqs[i].frms == 0)
+ continue;
+
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_INFO_LOW,
+ "peer %d (%p), txq %d\n",
+ peer->peer_ids[0], vdev, i);
+ ol_tx_queue_display(&peer->txqs[i], 6);
+ }
+ }
+ }
+}
+#endif
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
@@ -141,12 +1740,14 @@
vdev->ll_pause.txq.depth = 0;
qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
}
-#else /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
+#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
+
+#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev)
{
return;
}
-#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
+#endif
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
@@ -231,6 +1832,9 @@
}
#endif
+#endif
+
+#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
/**
* ol_txrx_pdev_pause() - pause network queues for each vdev
@@ -344,6 +1948,7 @@
/* Traffic is stopped */
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
"throttle phase --> OFF\n");
+ ol_txrx_throttle_pause(pdev);
ol_txrx_thermal_pause(pdev);
cur_level = pdev->tx_throttle.current_throttle_level;
cur_phase = pdev->tx_throttle.current_throttle_phase;
@@ -359,6 +1964,7 @@
/* Traffic can go */
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
"throttle phase --> ON\n");
+ ol_txrx_throttle_unpause(pdev);
ol_txrx_thermal_unpause(pdev);
cur_level = pdev->tx_throttle.current_throttle_level;
cur_phase = pdev->tx_throttle.current_throttle_phase;
@@ -381,6 +1987,55 @@
}
#endif
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_tx_set_throttle_phase_time() - Set the thermal mitgation throttle phase
+ * and time
+ * @pdev: the peer device object
+ * @level: throttle phase level
+ * @ms: throttle time
+ *
+ * Return: None
+ */
+static void
+ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
+{
+ qdf_timer_stop(&pdev->tx_throttle.phase_timer);
+
+ /* Set the phase */
+ if (level != THROTTLE_LEVEL_0) {
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
+ *ms = pdev->tx_throttle.throttle_time_ms[level]
+ [THROTTLE_PHASE_OFF];
+
+ /* pause all */
+ ol_txrx_throttle_pause(pdev);
+ } else {
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_ON;
+ *ms = pdev->tx_throttle.throttle_time_ms[level]
+ [THROTTLE_PHASE_ON];
+
+ /* unpause all */
+ ol_txrx_throttle_unpause(pdev);
+ }
+}
+#else
+
+static void
+ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
+{
+ /* Reset the phase */
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
+
+ /* Start with the new time */
+ *ms = pdev->tx_throttle.
+ throttle_time_ms[level][THROTTLE_PHASE_OFF];
+
+ qdf_timer_stop(&pdev->tx_throttle.phase_timer);
+}
+#endif
+
void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
{
int ms = 0;
@@ -397,15 +2052,7 @@
/* Set the current throttle level */
pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
- /* Reset the phase */
- pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
- ol_txrx_thermal_unpause(pdev);
-
- /* Start with the new time */
- ms = pdev->tx_throttle.
- throttle_time_ms[level][THROTTLE_PHASE_OFF];
-
- qdf_timer_stop(&pdev->tx_throttle.phase_timer);
+ ol_tx_set_throttle_phase_time(pdev, level, &ms);
if (level != THROTTLE_LEVEL_0)
qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
@@ -435,6 +2082,7 @@
pdev->tx_throttle.
throttle_time_ms[i][THROTTLE_PHASE_ON]);
}
+
}
void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
@@ -470,4 +2118,176 @@
pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
}
#endif /* QCA_SUPPORT_TX_THROTTLE */
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * ol_tx_vdev_has_tx_queue_group() - check for vdev having txq groups
+ * @group: pointer to tx queue grpup
+ * @vdev_id: vdev id
+ *
+ * Return: true if vedv has txq groups
+ */
+static bool
+ol_tx_vdev_has_tx_queue_group(
+ struct ol_tx_queue_group_t *group,
+ u_int8_t vdev_id)
+{
+ u_int16_t vdev_bitmap;
+ vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
+ if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id))
+ return true;
+
+ return false;
+}
+
+/**
+ * ol_tx_ac_has_tx_queue_group() - check for ac having txq groups
+ * @group: pointer to tx queue grpup
+ * @ac: acess category
+ *
+ * Return: true if vedv has txq groups
+ */
+static bool
+ol_tx_ac_has_tx_queue_group(
+ struct ol_tx_queue_group_t *group,
+ u_int8_t ac)
+{
+ u_int16_t ac_bitmap;
+ ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
+ if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac))
+ return true;
+
+ return false;
+}
+
+u_int32_t ol_tx_txq_group_credit_limit(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int32_t credit)
+{
+ u_int8_t i;
+ int updated_credit = credit;
+ /*
+ * If this tx queue belongs to a group, check whether the group's
+ * credit limit is more stringent than the global credit limit.
+ */
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ if (txq->group_ptrs[i]) {
+ int group_credit;
+ group_credit = qdf_atomic_read(
+ &txq->group_ptrs[i]->credit);
+ updated_credit = QDF_MIN(updated_credit, group_credit);
+ }
+ }
+
+ credit = (updated_credit < 0) ? 0 : updated_credit;
+
+ return credit;
+}
+
+void ol_tx_txq_group_credit_update(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int32_t credit,
+ u_int8_t absolute)
+{
+ u_int8_t i;
+ /*
+ * If this tx queue belongs to a group then
+ * update group credit
+ */
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ if (txq->group_ptrs[i])
+ ol_txrx_update_group_credit(txq->group_ptrs[i],
+ credit, absolute);
+ }
+ ol_tx_update_group_credit_stats(pdev);
+}
+
+void
+ol_tx_set_vdev_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t vdev_id,
+ struct ol_tx_queue_group_t *grp_ptr)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ struct ol_txrx_peer_t *peer = NULL;
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (vdev->vdev_id == vdev_id) {
+ u_int8_t i, j;
+ /* update vdev queues group pointers */
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++)
+ vdev->txqs[i].group_ptrs[j] = grp_ptr;
+ }
+ qdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ /* Update peer queue group pointers */
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ for (j = 0;
+ j < OL_TX_MAX_GROUPS_PER_QUEUE;
+ j++)
+ peer->txqs[i].group_ptrs[j] =
+ grp_ptr;
+ }
+ }
+ qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ break;
+ }
+ }
+}
+
+void ol_tx_txq_set_group_ptr(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_queue_group_t *grp_ptr)
+{
+ u_int8_t i;
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
+ txq->group_ptrs[i] = grp_ptr;
+}
+
+void ol_tx_set_peer_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ u_int8_t vdev_id,
+ u_int8_t tid)
+{
+ u_int8_t i, j = 0;
+ struct ol_tx_queue_group_t *group = NULL;
+
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
+ peer->txqs[tid].group_ptrs[i] = NULL;
+
+ for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+ group = &pdev->txq_grps[i];
+ if (ol_tx_vdev_has_tx_queue_group(group, vdev_id)) {
+ if (tid < OL_TX_NUM_QOS_TIDS) {
+ if (ol_tx_ac_has_tx_queue_group(
+ group,
+ TXRX_TID_TO_WMM_AC(tid))) {
+ peer->txqs[tid].group_ptrs[j] = group;
+ j++;
+ }
+ } else {
+ peer->txqs[tid].group_ptrs[j] = group;
+ j++;
+ }
+ }
+ if (j >= OL_TX_MAX_GROUPS_PER_QUEUE)
+ break;
+ }
+}
+
+u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
+{
+#ifdef HIF_SDIO
+ return OL_TX_MAX_TXQ_GROUPS;
+#else
+ return 0;
+#endif
+}
+#endif
+
/*--- End of LL tx throttle queue code ---------------------------------------*/
diff --git a/core/dp/txrx/ol_tx_queue.h b/core/dp/txrx/ol_tx_queue.h
index c78bbc2..56ff9e7 100644
--- a/core/dp/txrx/ol_tx_queue.h
+++ b/core/dp/txrx/ol_tx_queue.h
@@ -37,46 +37,405 @@
#include <qdf_types.h> /* bool */
/*--- function prototypes for optional queue log feature --------------------*/
-#if defined(ENABLE_TX_QUEUE_LOG)
+#if defined(ENABLE_TX_QUEUE_LOG) || \
+ (defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT))
+/**
+ * ol_tx_queue_log_enqueue() - enqueue tx queue logs
+ * @pdev: physical device object
+ * @msdu_info: tx msdu meta data
+ * @frms: number of frames for which logs need to be enqueued
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
void
ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_msdu_info_t *msdu_info,
int frms, int bytes);
+
+/**
+ * ol_tx_queue_log_dequeue() - dequeue tx queue logs
+ * @pdev: physical device object
+ * @txq: tx queue
+ * @frms: number of frames for which logs need to be dequeued
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
void
ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
struct ol_tx_frms_queue_t *txq, int frms, int bytes);
+
+/**
+ * ol_tx_queue_log_free() - free tx queue logs
+ * @pdev: physical device object
+ * @txq: tx queue
+ * @tid: tid value
+ * @frms: number of frames for which logs need to be freed
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
void
ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
struct ol_tx_frms_queue_t *txq,
int tid, int frms, int bytes);
-#define OL_TX_QUEUE_LOG_ENQUEUE ol_tx_queue_log_enqueue
-#define OL_TX_QUEUE_LOG_DEQUEUE ol_tx_queue_log_dequeue
-#define OL_TX_QUEUE_LOG_FREE ol_tx_queue_log_free
#else
-#define OL_TX_QUEUE_LOG_ENQUEUE(pdev, msdu_info, frms, bytes) /* no-op */
-#define OL_TX_QUEUE_LOG_DEQUEUE(pdev, txq, frms, bytes) /* no-op */
-#define OL_TX_QUEUE_LOG_FREE(pdev, txq, tid, frms, bytes) /* no-op */
+static inline void
+ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_msdu_info_t *msdu_info,
+ int frms, int bytes)
+{
+ return;
+}
-#endif /* TXRX_DEBUG_LEVEL > 5 */
+static inline void
+ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq, int frms, int bytes)
+{
+ return;
+}
-#define ol_tx_enqueue(pdev, txq, tx_desc, tx_msdu_info) /* no-op */
-#define ol_tx_dequeue(pdev, ext_tid, txq, head, num_frames, credit, bytes) 0
-#define ol_tx_queue_free(pdev, txq, tid) /* no-op */
-#define ol_tx_queue_discard(pdev, flush, tx_descs) /* no-op */
+static inline void
+ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid, int frms, int bytes)
+{
+ return;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * @brief Queue a tx frame to the tid queue.
+ *
+ * @param pdev - the data virtual device sending the data
+ * (for storing the tx desc in the virtual dev's tx_target_list,
+ * and for accessing the phy dev)
+ * @param txq - which queue the tx frame gets stored in
+ * @param tx_desc - tx meta-data, including prev and next ptrs
+ * @param tx_msdu_info - characteristics of the tx frame
+ */
+void
+ol_tx_enqueue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_desc_t *tx_desc,
+ struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+/**
+ * @brief - remove the specified number of frames from the head of a tx queue
+ * @details
+ * This function removes frames from the head of a tx queue,
+ * and returns them as a NULL-terminated linked list.
+ * The function will remove frames until one of the following happens:
+ * 1. The tx queue is empty
+ * 2. The specified number of frames have been removed
+ * 3. Removal of more frames would exceed the specified credit limit
+ *
+ * @param pdev - the physical device object
+ * @param txq - which tx queue to remove frames from
+ * @param head - which contains return linked-list of tx frames (descriptors)
+ * @param num_frames - maximum number of frames to remove
+ * @param[in/out] credit -
+ * input: max credit the dequeued frames can consume
+ * output: how much credit the dequeued frames consume
+ * @param[out] bytes - the sum of the sizes of the dequeued frames
+ * @return number of frames dequeued
+*/
+u_int16_t
+ol_tx_dequeue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ ol_tx_desc_list *head,
+ u_int16_t num_frames,
+ u_int32_t *credit,
+ int *bytes);
+
+/**
+ * @brief - free all of frames from the tx queue while deletion
+ * @details
+ * This function frees all of frames from the tx queue.
+ * This function is called during peer or vdev deletion.
+ * This function notifies the scheduler, so the scheduler can update
+ * its state to account for the absence of the queue.
+ *
+ * @param pdev - the physical device object, which stores the txqs
+ * @param txq - which tx queue to free frames from
+ * @param tid - the extended TID that the queue belongs to
+ */
+void
+ol_tx_queue_free(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid);
+
+/**
+ * @brief - discard pending tx frames from the tx queue
+ * @details
+ * This function is called if there are too many queues in tx scheduler.
+ * This function is called if we wants to flush all pending tx
+ * queues in tx scheduler.
+ *
+ * @param pdev - the physical device object, which stores the txqs
+ * @param flush_all - flush all pending tx queues if set to true
+ * @param tx_descs - List Of tx_descs to be discarded will be returned by this function
+ */
+
+void
+ol_tx_queue_discard(
+ struct ol_txrx_pdev_t *pdev,
+ bool flush_all,
+ ol_tx_desc_list *tx_descs);
+
+#else
+
+static inline void
+ol_tx_enqueue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_desc_t *tx_desc,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ return;
+}
+
+static inline u_int16_t
+ol_tx_dequeue(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ ol_tx_desc_list *head,
+ u_int16_t num_frames,
+ u_int32_t *credit,
+ int *bytes)
+{
+ return 0;
+}
+
+static inline void
+ol_tx_queue_free(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid)
+{
+ return;
+}
+
+static inline void
+ol_tx_queue_discard(
+ struct ol_txrx_pdev_t *pdev,
+ bool flush_all,
+ ol_tx_desc_list *tx_descs)
+{
+ return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+void
+ol_txrx_peer_bal_add_limit_peer(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id,
+ u_int16_t peer_limit);
+
+void
+ol_txrx_peer_bal_remove_limit_peer(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id);
+
+/**
+ * ol_txrx_peer_pause_but_no_mgmt_q() - suspend/pause all txqs except
+ * management queue for a given peer
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer);
+
+/**
+ * ol_txrx_peer_unpause_but_no_mgmt_q() - unpause all txqs except management
+ * queue for a given peer
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer);
+
+/**
+ * ol_tx_bad_peer_dequeue_check() - retrieve the send limit
+ * of the tx queue category
+ * @txq: tx queue of the head of the category list
+ * @max_frames: send limit of the txq category
+ * @tx_limit_flag: set true is tx limit is reached
+ *
+ * Return: send limit
+ */
+u_int16_t
+ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
+ u_int16_t max_frames,
+ u_int16_t *tx_limit_flag);
+
+/**
+ * ol_tx_bad_peer_update_tx_limit() - update the send limit of the
+ * tx queue category
+ * @pdev: the physical device object
+ * @txq: tx queue of the head of the category list
+ * @frames: frames that has been dequeued
+ * @tx_limit_flag: tx limit reached flag
+ *
+ * Return: None
+ */
+void
+ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int16_t frames,
+ u_int16_t tx_limit_flag);
+
+/**
+ * ol_txrx_set_txq_peer() - set peer to the tx queue's peer
+ * @txq: tx queue for a given tid
+ * @peer: the peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_set_txq_peer(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_txrx_peer_t *peer);
+
+/**
+ * @brief - initialize the peer balance context
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev);
+
+/**
+ * @brief - deinitialize the peer balance context
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev);
+
+#else
+
+static inline void ol_txrx_peer_bal_add_limit_peer(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id,
+ u_int16_t peer_limit)
+{
+ return;
+}
+
+static inline void ol_txrx_peer_bal_remove_limit_peer(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t peer_id)
+{
+ return;
+}
+
+static inline void ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+ return;
+}
+
+static inline void ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+ return;
+}
+
+static inline u_int16_t
+ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
+ u_int16_t max_frames,
+ u_int16_t *tx_limit_flag)
+{
+ /* just return max_frames */
+ return max_frames;
+}
+
+static inline void
+ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int16_t frames,
+ u_int16_t tx_limit_flag)
+{
+ return;
+}
+
+static inline void
+ol_txrx_set_txq_peer(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_txrx_peer_t *peer)
+{
+ return;
+}
+
+static inline void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
+
+#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
+
+/**
+ * ol_tx_queue_log_sched() - start logging of tx queues for HL
+ * @pdev: physical device object
+ * @credit: number of credits
+ * @num_active_tids: number of active tids for which logging needs to be done
+ * @active_bitmap:bitmap
+ * @data: buffer
+ *
+ * Return: None
+ */
void
ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
int credit,
int *num_active_tids,
uint32_t **active_bitmap, uint8_t **data);
+#else
-#define OL_TX_QUEUE_LOG_SCHED( \
- pdev, credit, num_active_tids, active_bitmap, data)
+static inline void
+ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
+ int credit,
+ int *num_active_tids,
+ uint32_t **active_bitmap, uint8_t **data)
+{
+ return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING) */
-#define ol_tx_queues_display(pdev) /* no-op */
+#if defined(CONFIG_HL_SUPPORT) && TXRX_DEBUG_LEVEL > 5
+/**
+ * @brief - show current state of all tx queues
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void
+ol_tx_queues_display(struct ol_txrx_pdev_t *pdev);
+
+#else
+
+static inline void
+ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif
#define ol_tx_queue_decs_reinit(peer, peer_id) /* no-op */
@@ -89,4 +448,134 @@
#else
#define ol_tx_throttle_init(pdev) /*no op */
#endif
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+static inline bool
+ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq)
+{
+ return txq == pdev->tx_sched.last_used_txq;
+}
+
+/**
+ * ol_tx_txq_group_credit_limit() - check for credit limit of a given tx queue
+ * @pdev: physical device object
+ * @txq: tx queue for which credit limit needs be to checked
+ * @credit: number of credits of the selected category
+ *
+ * Return: updated credits
+ */
+u_int32_t ol_tx_txq_group_credit_limit(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int32_t credit);
+
+/**
+ * ol_tx_txq_group_credit_update() - update group credits of the
+ * selected catoegory
+ * @pdev: physical device object
+ * @txq: tx queue for which credit needs to be updated
+ * @credit: number of credits by which selected category needs to be updated
+ * @absolute: TXQ group absolute value
+ *
+ * Return: None
+ */
+void ol_tx_txq_group_credit_update(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int32_t credit,
+ u_int8_t absolute);
+
+/**
+ * ol_tx_set_vdev_group_ptr() - update vdev queues group pointer
+ * @pdev: physical device object
+ * @vdev_id: vdev id for which group pointer needs to update
+ * @grp_ptr: pointer to ol tx queue group which needs to be set for vdev queues
+ *
+ * Return: None
+ */
+void
+ol_tx_set_vdev_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t vdev_id,
+ struct ol_tx_queue_group_t *grp_ptr);
+
+/**
+ * ol_tx_txq_set_group_ptr() - update tx queue group pointer
+ * @txq: tx queue of which group pointer needs to update
+ * @grp_ptr: pointer to ol tx queue group which needs to be
+ * set for given tx queue
+ *
+ *
+ * Return: None
+ */
+void
+ol_tx_txq_set_group_ptr(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_queue_group_t *grp_ptr);
+
+/**
+ * ol_tx_set_peer_group_ptr() - update peer tx queues group pointer
+ * for a given tid
+ * @pdev: physical device object
+ * @peer: peer device object
+ * @vdev_id: vdev id
+ * @tid: tid for which group pointer needs to update
+ *
+ *
+ * Return: None
+ */
+void
+ol_tx_set_peer_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ u_int8_t vdev_id,
+ u_int8_t tid);
+#else
+
+static inline bool
+ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq)
+{
+ return 0;
+}
+
+static inline
+u_int32_t ol_tx_txq_group_credit_limit(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int32_t credit)
+{
+ return credit;
+}
+
+static inline void ol_tx_txq_group_credit_update(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int32_t credit,
+ u_int8_t absolute)
+{
+ return;
+}
+
+static inline void
+ol_tx_txq_set_group_ptr(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_queue_group_t *grp_ptr)
+{
+ return;
+}
+
+static inline void
+ol_tx_set_peer_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ u_int8_t vdev_id,
+ u_int8_t tid)
+{
+ return;
+}
+#endif
+
#endif /* _OL_TX_QUEUE__H_ */
diff --git a/core/dp/txrx/ol_tx_sched.c b/core/dp/txrx/ol_tx_sched.c
new file mode 100644
index 0000000..bafd83c
--- /dev/null
+++ b/core/dp/txrx/ol_tx_sched.c
@@ -0,0 +1,1482 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
+#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h> /* pdev stats, etc. */
+#include <ol_tx_desc.h> /* ol_tx_desc */
+#include <ol_tx_send.h> /* ol_tx_send */
+#include <ol_tx_sched.h> /* OL_TX_SCHED, etc. */
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+#include <qdf_types.h>
+#include <qdf_mem.h> /* qdf_os_mem_alloc_consistent et al */
+
+#if defined(CONFIG_HL_SUPPORT)
+
+#if defined(DEBUG_HL_LOGGING)
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
+
+#else
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif /* defined(DEBUG_HL_LOGGING) */
+
+#if DEBUG_HTT_CREDIT
+#define OL_TX_DISPATCH_LOG_CREDIT() \
+ do { \
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, \
+ "TX %d bytes\n", qdf_nbuf_len(msdu)); \
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, \
+ " <HTT> Decrease credit %d - 1 = %d, len:%d.\n", \
+ qdf_atomic_read(&pdev->target_tx_credit), \
+ qdf_atomic_read(&pdev->target_tx_credit) - 1, \
+ qdf_nbuf_len(msdu)); \
+ } while (0)
+#else
+#define OL_TX_DISPATCH_LOG_CREDIT()
+#endif
+
+/*--- generic definitions used by the scheduler framework for all algs ---*/
+
+struct ol_tx_sched_ctx {
+ ol_tx_desc_list head;
+ int frms;
+};
+
+typedef TAILQ_HEAD(ol_tx_frms_queue_list_s, ol_tx_frms_queue_t)
+ ol_tx_frms_queue_list;
+
+#define OL_A_MAX(_x, _y) ((_x) > (_y) ? (_x) : (_y))
+
+#define OL_A_MIN(_x, _y) ((_x) < (_y) ? (_x) : (_y))
+
+ /*--- scheduler algorithm selection ---*/
+
+ /*--- scheduler options -----------------------------------------------
+ * 1. Round-robin scheduler:
+ * Select the TID that is at the head of the list of active TIDs.
+ * Select the head tx queue for this TID.
+ * Move the tx queue to the back of the list of tx queues for
+ * this TID.
+ * Move the TID to the back of the list of active TIDs.
+ * Send as many frames from the tx queue as credit allows.
+ * 2. Weighted-round-robin advanced scheduler:
+ * Keep an ordered list of which TID gets selected next.
+ * Use a weighted-round-robin scheme to determine when to promote
+ * a TID within this list.
+ * If a TID at the head of the list is inactive, leave it at the
+ * head, but check the next TIDs.
+ * If the credit available is less than the credit threshold for the
+ * next active TID, don't send anything, and leave the TID at the
+ * head of the list.
+ * After a TID is selected, move it to the back of the list.
+ * Select the head tx queue for this TID.
+ * Move the tx queue to the back of the list of tx queues for this
+ * TID.
+ * Send no more frames than the limit specified for the TID.
+ */
+#define OL_TX_SCHED_RR 1
+#define OL_TX_SCHED_WRR_ADV 2
+
+#ifndef OL_TX_SCHED
+ /*#define OL_TX_SCHED OL_TX_SCHED_RR*/
+#define OL_TX_SCHED OL_TX_SCHED_WRR_ADV /* default */
+#endif
+
+
+#if OL_TX_SCHED == OL_TX_SCHED_RR
+
+#define ol_tx_sched_rr_t ol_tx_sched_t
+
+#define OL_TX_SCHED_NUM_CATEGORIES (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
+
+#define ol_tx_sched_init ol_tx_sched_init_rr
+#define ol_tx_sched_select_init(pdev) /* no-op */
+#define ol_tx_sched_select_batch ol_tx_sched_select_batch_rr
+#define ol_tx_sched_txq_enqueue ol_tx_sched_txq_enqueue_rr
+#define ol_tx_sched_txq_deactivate ol_tx_sched_txq_deactivate_rr
+#define ol_tx_sched_category_tx_queues ol_tx_sched_category_tx_queues_rr
+#define ol_tx_sched_txq_discard ol_tx_sched_txq_discard_rr
+#define ol_tx_sched_category_info ol_tx_sched_category_info_rr
+#define ol_tx_sched_discard_select_category \
+ ol_tx_sched_discard_select_category_rr
+
+#elif OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
+
+#define ol_tx_sched_wrr_adv_t ol_tx_sched_t
+
+#define OL_TX_SCHED_NUM_CATEGORIES OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES
+
+#define ol_tx_sched_init ol_tx_sched_init_wrr_adv
+#define ol_tx_sched_select_init(pdev) \
+ do { \
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock); \
+ ol_tx_sched_select_init_wrr_adv(pdev); \
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock); \
+ } while (0)
+#define ol_tx_sched_select_batch ol_tx_sched_select_batch_wrr_adv
+#define ol_tx_sched_txq_enqueue ol_tx_sched_txq_enqueue_wrr_adv
+#define ol_tx_sched_txq_deactivate ol_tx_sched_txq_deactivate_wrr_adv
+#define ol_tx_sched_category_tx_queues ol_tx_sched_category_tx_queues_wrr_adv
+#define ol_tx_sched_txq_discard ol_tx_sched_txq_discard_wrr_adv
+#define ol_tx_sched_category_info ol_tx_sched_category_info_wrr_adv
+#define ol_tx_sched_discard_select_category \
+ ol_tx_sched_discard_select_category_wrr_adv
+
+#else
+
+#error Unknown OL TX SCHED specification
+
+#endif /* OL_TX_SCHED */
+
+ /*--- round-robin scheduler ----------------------------------------*/
+#if OL_TX_SCHED == OL_TX_SCHED_RR
+
+ /*--- definitions ---*/
+
+ struct ol_tx_active_queues_in_tid_t {
+ /* list_elem is used to queue up into up level queues*/
+ TAILQ_ENTRY(ol_tx_active_queues_in_tid_t) list_elem;
+ u_int32_t frms;
+ u_int32_t bytes;
+ ol_tx_frms_queue_list head;
+ bool active;
+ int tid;
+ };
+
+ struct ol_tx_sched_rr_t {
+ struct ol_tx_active_queues_in_tid_t
+ tx_active_queues_in_tid_array[OL_TX_NUM_TIDS
+ + OL_TX_VDEV_NUM_QUEUES];
+ TAILQ_HEAD(ol_tx_active_tids_s, ol_tx_active_queues_in_tid_t)
+ tx_active_tids_list;
+ u_int8_t discard_weights[OL_TX_NUM_TIDS
+ + OL_TX_VDEV_NUM_QUEUES];
+ };
+
+#define TX_SCH_MAX_CREDIT_FOR_THIS_TID(tidq) 16
+
+/*--- functions ---*/
+
+/*
+ * The scheduler sync spinlock has been acquired outside this function,
+ * so there is no need to worry about mutex within this function.
+ */
+static int
+ol_tx_sched_select_batch_rr(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_ctx *sctx,
+ u_int32_t credit)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+ struct ol_tx_frms_queue_t *next_tq;
+ u_int16_t frames, used_credits, tx_limit, tx_limit_flag = 0;
+ int bytes;
+
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+ if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
+ return;
+
+ txq_queue = TAILQ_FIRST(&scheduler->tx_active_tids_list);
+
+ TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue, list_elem);
+ txq_queue->active = false;
+
+ next_tq = TAILQ_FIRST(&txq_queue->head);
+ TAILQ_REMOVE(&txq_queue->head, next_tq, list_elem);
+
+ credit = OL_A_MIN(credit, TX_SCH_MAX_CREDIT_FOR_THIS_TID(next_tq));
+ frames = next_tq->frms; /* download as many frames as credit allows */
+ tx_limit = ol_tx_bad_peer_dequeue_check(txq,
+ category->specs.send_limit,
+ &tx_limit_flag);
+ frames = ol_tx_dequeue(
+ pdev, txq, &sctx->head, tx_limit, &credit, &bytes);
+ ol_tx_bad_peer_update_tx_limit(pdev, txq, frames, tx_limit_flag);
+
+ used_credits = credit;
+ txq_queue->frms -= frames;
+ txq_queue->bytes -= bytes;
+
+ if (next_tq->frms > 0) {
+ TAILQ_INSERT_TAIL(&txq_queue->head, next_tq, list_elem);
+ TAILQ_INSERT_TAIL(
+ &scheduler->tx_active_tids_list,
+ txq_queue, list_elem);
+ txq_queue->active = true;
+ } else if (!TAILQ_EMPTY(&txq_queue->head)) {
+ /*
+ * This tx queue is empty, but there's another tx queue for the
+ * same TID that is not empty.
+ *Thus, the TID as a whole is active.
+ */
+ TAILQ_INSERT_TAIL(
+ &scheduler->tx_active_tids_list,
+ txq_queue, list_elem);
+ txq_queue->active = true;
+ }
+ sctx->frms += frames;
+
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+ return used_credits;
+}
+
+static inline void
+ol_tx_sched_txq_enqueue_rr(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid,
+ int frms,
+ int bytes)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+ txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+ if (txq->flag != ol_tx_queue_active)
+ TAILQ_INSERT_TAIL(&txq_queue->head, txq, list_elem);
+
+ txq_queue->frms += frms;
+ txq_queue->bytes += bytes;
+
+ if (!txq_queue->active) {
+ TAILQ_INSERT_TAIL(
+ &scheduler->tx_active_tids_list,
+ txq_queue, list_elem);
+ txq_queue->active = true;
+ }
+}
+
+static inline void
+ol_tx_sched_txq_deactivate_rr(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+ txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+ txq_queue->frms -= txq->frms;
+ txq_queue->bytes -= txq->bytes;
+
+ TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
+ /*if (txq_queue->frms == 0 && txq_queue->active) {*/
+ if (TAILQ_EMPTY(&txq_queue->head) && txq_queue->active) {
+ TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
+ list_elem);
+ txq_queue->active = false;
+ }
+}
+
+ol_tx_frms_queue_list *
+ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t *pdev, int tid)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+ txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+ return &txq_queue->head;
+}
+
+int
+ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_rr_t *scheduler;
+ u_int8_t i, tid = 0;
+ int max_score = 0;
+
+ scheduler = pdev->tx_sched.scheduler;
+ /*
+ * Choose which TID's tx frames to drop next based on two factors:
+ * 1. Which TID has the most tx frames present
+ * 2. The TID's priority (high-priority TIDs have a low discard_weight)
+ */
+ for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
+ int score;
+ score =
+ scheduler->tx_active_queues_in_tid_array[i].frms *
+ scheduler->discard_weights[i];
+ if (max_score == 0 || score > max_score) {
+ max_score = score;
+ tid = i;
+ }
+ }
+ return tid;
+}
+
+void
+ol_tx_sched_txq_discard_rr(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid, int frames, int bytes)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+ txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+
+ if (0 == txq->frms)
+ TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
+
+ txq_queue->frms -= frames;
+ txq_queue->bytes -= bytes;
+ if (txq_queue->active == true && txq_queue->frms == 0) {
+ TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
+ list_elem);
+ txq_queue->active = false;
+ }
+}
+
+void
+ol_tx_sched_category_info_rr(
+ struct ol_txrx_pdev_t *pdev,
+ int cat, int *active,
+ int *frms, int *bytes)
+{
+ struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+ txq_queue = &scheduler->tx_active_queues_in_tid_array[cat];
+
+ *active = txq_queue->active;
+ *frms = txq_queue->frms;
+ *bytes = txq_queue->bytes;
+}
+
+enum {
+ ol_tx_sched_discard_weight_voice = 1,
+ ol_tx_sched_discard_weight_video = 4,
+ ol_tx_sched_discard_weight_ucast_default = 8,
+ ol_tx_sched_discard_weight_mgmt_non_qos = 1, /* 0? */
+ ol_tx_sched_discard_weight_mcast = 1, /* 0? also for probe & assoc */
+};
+
+void *
+ol_tx_sched_init_rr(
+ struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_rr_t *scheduler;
+ int i;
+
+ scheduler = qdf_mem_malloc(sizeof(struct ol_tx_sched_rr_t));
+ if (scheduler == NULL)
+ return scheduler;
+
+ for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
+ scheduler->tx_active_queues_in_tid_array[i].tid = i;
+ TAILQ_INIT(&scheduler->tx_active_queues_in_tid_array[i].head);
+ scheduler->tx_active_queues_in_tid_array[i].active = 0;
+ scheduler->tx_active_queues_in_tid_array[i].frms = 0;
+ scheduler->tx_active_queues_in_tid_array[i].bytes = 0;
+ }
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ scheduler->tx_active_queues_in_tid_array[i].tid = i;
+ if (i < OL_TX_NON_QOS_TID) {
+ int ac = TXRX_TID_TO_WMM_AC(i);
+ switch (ac) {
+ case TXRX_WMM_AC_VO:
+ scheduler->discard_weights[i] =
+ ol_tx_sched_discard_weight_voice;
+ case TXRX_WMM_AC_VI:
+ scheduler->discard_weights[i] =
+ ol_tx_sched_discard_weight_video;
+ default:
+ scheduler->discard_weights[i] =
+ ol_tx_sched_discard_weight_ucast_default;
+ };
+ } else {
+ scheduler->discard_weights[i] =
+ ol_tx_sched_discard_weight_mgmt_non_qos;
+ }
+ }
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ int j = i + OL_TX_NUM_TIDS;
+ scheduler->tx_active_queues_in_tid_array[j].tid =
+ OL_TX_NUM_TIDS - 1;
+ scheduler->discard_weights[j] =
+ ol_tx_sched_discard_weight_mcast;
+ }
+ TAILQ_INIT(&scheduler->tx_active_tids_list);
+
+ return scheduler;
+}
+
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ struct ol_tx_wmm_param_t wmm_param)
+{
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+ "Dummy function when OL_TX_SCHED_RR is enabled\n");
+}
+
+#endif /* OL_TX_SCHED == OL_TX_SCHED_RR */
+
+/*--- advanced scheduler ----------------------------------------------------*/
+#if OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
+
+/*--- definitions ---*/
+
+struct ol_tx_sched_wrr_adv_category_info_t {
+ struct {
+ int wrr_skip_weight;
+ u_int32_t credit_threshold;
+ u_int16_t send_limit;
+ int credit_reserve;
+ int discard_weight;
+ } specs;
+ struct {
+ int wrr_count;
+ int frms;
+ int bytes;
+ ol_tx_frms_queue_list head;
+ bool active;
+ } state;
+#ifdef DEBUG_HL_LOGGING
+ struct {
+ char *cat_name;
+ unsigned int queued;
+ unsigned int dispatched;
+ unsigned int discard;
+ } stat;
+#endif
+};
+
+#define OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(cat, \
+ wrr_skip_weight, \
+ credit_threshold, \
+ send_limit, \
+ credit_reserve, \
+ discard_weights) \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
+ (wrr_skip_weight) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
+ (credit_threshold) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
+ (send_limit) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
+ (credit_reserve) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
+ (discard_weights) }
+
+/* Rome:
+ * For high-volume traffic flows (VI, BE, BK), use a credit threshold
+ * roughly equal to a large A-MPDU (occupying half the target memory
+ * available for holding tx frames) to download AMPDU-sized batches
+ * of traffic.
+ * For high-priority, low-volume traffic flows (VO and mgmt), use no
+ * credit threshold, to minimize download latency.
+ */
+/* WRR send
+ * skip credit limit credit disc
+ * wts thresh (frms) reserv wts
+ */
+#ifdef HIF_SDIO
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO, 1, 17, 24, 0, 1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI, 3, 17, 16, 1, 4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE, 10, 17, 16, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK, 12, 6, 6, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12, 6, 4, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT, 1, 1, 4, 0, 1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA, 10, 17, 4, 1, 4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT, 1, 1, 4, 0, 1);
+#else
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO, 1, 16, 24, 0, 1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI, 3, 16, 16, 1, 4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE, 10, 12, 12, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK, 12, 6, 6, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12, 6, 4, 1, 8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT, 1, 1, 4, 0, 1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA, 10, 16, 4, 1, 4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT, 1, 1, 4, 0, 1);
+#endif
+
+#ifdef DEBUG_HL_LOGGING
+
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler) \
+ do { \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .stat.queued = 0; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .stat.discard = 0; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .stat.dispatched = 0; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .stat.cat_name = #category; \
+ } while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms) \
+ do { \
+ category->stat.queued += frms; \
+ } while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms) \
+ do { \
+ category->stat.discard += frms; \
+ } while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms) \
+ do { \
+ category->stat.dispatched += frms; \
+ } while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler) \
+ ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler) \
+ ol_tx_sched_wrr_adv_cat_cur_state_dump(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler) \
+ ol_tx_sched_wrr_adv_cat_stat_clear(scheduler)
+
+#else /* DEBUG_HL_LOGGING */
+
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)
+
+#endif /* DEBUG_HL_LOGGING */
+
+#define OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(category, scheduler) \
+ do { \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .specs.wrr_skip_weight = \
+ OL_TX_SCHED_WRR_ADV_ ## category ## _WRR_SKIP_WEIGHT; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .specs.credit_threshold = \
+ OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_THRESHOLD; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .specs.send_limit = \
+ OL_TX_SCHED_WRR_ADV_ ## category ## _SEND_LIMIT; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .specs.credit_reserve = \
+ OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_RESERVE; \
+ scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+ .specs.discard_weight = \
+ OL_TX_SCHED_WRR_ADV_ ## category ## _DISCARD_WEIGHT; \
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler); \
+ } while (0)
+
+struct ol_tx_sched_wrr_adv_t {
+ int order[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
+ int index;
+ struct ol_tx_sched_wrr_adv_category_info_t
+ categories[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
+};
+
+#define OL_TX_AIFS_DEFAULT_VO 2
+#define OL_TX_AIFS_DEFAULT_VI 2
+#define OL_TX_AIFS_DEFAULT_BE 3
+#define OL_TX_AIFS_DEFAULT_BK 7
+#define OL_TX_CW_MIN_DEFAULT_VO 3
+#define OL_TX_CW_MIN_DEFAULT_VI 7
+#define OL_TX_CW_MIN_DEFAULT_BE 15
+#define OL_TX_CW_MIN_DEFAULT_BK 15
+
+/*--- functions ---*/
+
+#ifdef DEBUG_HL_LOGGING
+static void ol_tx_sched_wrr_adv_cat_stat_dump(
+ struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+ int i;
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Scheduler Stats:");
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "====category(CRR,CRT,WSW): Queued Discard Dequeued frms wrr===");
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%12s(%2d, %2d, %2d): %6d %7d %8d %4d %3d",
+ scheduler->categories[i].stat.cat_name,
+ scheduler->categories[i].specs.credit_reserve,
+ scheduler->categories[i].specs.credit_threshold,
+ scheduler->categories[i].specs.wrr_skip_weight,
+ scheduler->categories[i].stat.queued,
+ scheduler->categories[i].stat.discard,
+ scheduler->categories[i].stat.dispatched,
+ scheduler->categories[i].state.frms,
+ scheduler->categories[i].state.wrr_count);
+ }
+}
+
+static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
+ struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+ int i;
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Scheduler State Snapshot:");
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "====category(CRR,CRT,WSW): IS_Active Pend_Frames Pend_bytes wrr===");
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%12s(%2d, %2d, %2d): %9d %11d %10d %3d",
+ scheduler->categories[i].stat.cat_name,
+ scheduler->categories[i].specs.credit_reserve,
+ scheduler->categories[i].specs.credit_threshold,
+ scheduler->categories[i].specs.wrr_skip_weight,
+ scheduler->categories[i].state.active,
+ scheduler->categories[i].state.frms,
+ scheduler->categories[i].state.bytes,
+ scheduler->categories[i].state.wrr_count);
+ }
+}
+
+static void ol_tx_sched_wrr_adv_cat_stat_clear(
+ struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+ int i;
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+ scheduler->categories[i].stat.queued = 0;
+ scheduler->categories[i].stat.discard = 0;
+ scheduler->categories[i].stat.dispatched = 0;
+ }
+}
+
+#endif
+
+static void
+ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ /* start selection from the front of the ordered list */
+ scheduler->index = 0;
+ pdev->tx_sched.last_used_txq = NULL;
+}
+
+static void
+ol_tx_sched_wrr_adv_rotate_order_list_tail(
+ struct ol_tx_sched_wrr_adv_t *scheduler, int idx)
+{
+ int value;
+ /* remember the value of the specified element */
+ value = scheduler->order[idx];
+ /* shift all further elements up one space */
+ for (; idx < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES-1; idx++)
+ scheduler->order[idx] = scheduler->order[idx + 1];
+
+ /* put the specified element at the end */
+ scheduler->order[idx] = value;
+}
+
+static void
+ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t *pdev,
+ u_int32_t credit)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ int i;
+ int okay = 1;
+
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+ if (scheduler->categories[i].specs.credit_threshold > credit) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "*** Config error: credit (%d) not enough to support category %d threshold (%d)\n",
+ credit, i,
+ scheduler->categories[i].specs.
+ credit_threshold);
+ okay = 0;
+ }
+ }
+ qdf_assert(okay);
+}
+
+/*
+ * The scheduler sync spinlock has been acquired outside this function,
+ * so there is no need to worry about mutex within this function.
+ */
+static int
+ol_tx_sched_select_batch_wrr_adv(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_ctx *sctx,
+ u_int32_t credit)
+{
+ static int first = 1;
+ int category_index = 0;
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_frms_queue_t *txq;
+ int index;
+ struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
+ int frames, bytes, used_credits = 0, tx_limit;
+ u_int16_t tx_limit_flag;
+
+ /*
+ * Just for good measure, do a sanity check that the initial credit
+ * is enough to cover every category's credit threshold.
+ */
+ if (first) {
+ first = 0;
+ ol_tx_sched_wrr_adv_credit_sanity_check(pdev, credit);
+ }
+
+ /* choose the traffic category from the ordered list */
+ index = scheduler->index;
+ while (index < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
+ category_index = scheduler->order[index];
+ category = &scheduler->categories[category_index];
+ if (!category->state.active) {
+ /* move on to the next category */
+ index++;
+ continue;
+ }
+ if (++category->state.wrr_count <
+ category->specs.wrr_skip_weight) {
+ /* skip this cateogry (move it to the back) */
+ ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
+ index);
+ /* try again (iterate) on the new element
+ *that was moved up */
+ continue;
+ }
+ /* found the first active category whose WRR turn is present */
+ break;
+ }
+ if (index >= OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
+ /* no categories are active */
+ return 0;
+ }
+
+ /* is there enough credit for the selected category? */
+ if (credit < category->specs.credit_threshold) {
+ /*
+ * Can't send yet - wait until more credit becomes available.
+ * In the meantime, restore the WRR counter (since we didn't
+ * service this category after all).
+ */
+ category->state.wrr_count = category->state.wrr_count - 1;
+ return 0;
+ }
+ /* enough credit is available - go ahead and send some frames */
+ /*
+ * This category was serviced - reset the WRR counter, and move this
+ * category to the back of the order list.
+ */
+ category->state.wrr_count = 0;
+ ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler, index);
+ /*
+ * With this category moved to the back, if there's still any credit
+ * left, set up the next invocation of this function to start from
+ * where this one left off, by looking at the category that just got
+ * shifted forward into the position the service category was
+ * occupying.
+ */
+ scheduler->index = index;
+
+ /*
+ * Take the tx queue from the head of the category list.
+ */
+ txq = TAILQ_FIRST(&category->state.head);
+
+ if (txq) {
+ TAILQ_REMOVE(&category->state.head, txq, list_elem);
+ credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
+ if (credit > category->specs.credit_reserve) {
+ credit -= category->specs.credit_reserve;
+ /*
+ * this tx queue will download some frames,
+ * so update last_used_txq
+ */
+ pdev->tx_sched.last_used_txq = txq;
+
+ tx_limit = ol_tx_bad_peer_dequeue_check(txq,
+ category->specs.send_limit,
+ &tx_limit_flag);
+ frames = ol_tx_dequeue(
+ pdev, txq, &sctx->head,
+ tx_limit, &credit, &bytes);
+ ol_tx_bad_peer_update_tx_limit(pdev, txq,
+ frames,
+ tx_limit_flag);
+
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category,
+ frames);
+ used_credits = credit;
+ category->state.frms -= frames;
+ category->state.bytes -= bytes;
+ if (txq->frms > 0) {
+ TAILQ_INSERT_TAIL(&category->state.head,
+ txq, list_elem);
+ } else {
+ if (category->state.frms == 0)
+ category->state.active = 0;
+ }
+ sctx->frms += frames;
+ ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
+ } else {
+ if (ol_tx_is_txq_last_serviced_queue(pdev, txq)) {
+ /*
+ * The scheduler has looked at all the active
+ * tx queues but none were able to download any
+ * of their tx frames.
+ * Nothing is changed, so if none were able
+ * to download before,
+ * they wont be able to download now.
+ * Return that no credit has been used, which
+ * will cause the scheduler to stop.
+ */
+ TAILQ_INSERT_HEAD(&category->state.head, txq,
+ list_elem);
+ return 0;
+ } else {
+ TAILQ_INSERT_TAIL(&category->state.head, txq,
+ list_elem);
+ if (!pdev->tx_sched.last_used_txq)
+ pdev->tx_sched.last_used_txq = txq;
+ }
+ }
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+ } else {
+ used_credits = 0;
+ /* TODO: find its reason */
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "ol_tx_sched_select_batch_wrr_adv: error, no TXQ can be popped.");
+ }
+ return used_credits;
+}
+
+static inline void
+ol_tx_sched_txq_enqueue_wrr_adv(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid,
+ int frms,
+ int bytes)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+ category = &scheduler->categories[pdev->tid_to_ac[tid]];
+ category->state.frms += frms;
+ category->state.bytes += bytes;
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms);
+ if (txq->flag != ol_tx_queue_active) {
+ TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
+ category->state.active = 1; /* may have already been active */
+ }
+}
+
+static inline void
+ol_tx_sched_txq_deactivate_wrr_adv(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+ category = &scheduler->categories[pdev->tid_to_ac[tid]];
+ category->state.frms -= txq->frms;
+ category->state.bytes -= txq->bytes;
+
+ TAILQ_REMOVE(&category->state.head, txq, list_elem);
+
+ if (category->state.frms == 0 && category->state.active)
+ category->state.active = 0;
+}
+
+ol_tx_frms_queue_list *
+ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t *pdev, int cat)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+ category = &scheduler->categories[cat];
+ return &category->state.head;
+}
+
+int
+ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler;
+ u_int8_t i, cat = 0;
+ int max_score = 0;
+
+ scheduler = pdev->tx_sched.scheduler;
+ /*
+ * Choose which category's tx frames to drop next based on two factors:
+ * 1. Which category has the most tx frames present
+ * 2. The category's priority (high-priority categories have a low
+ * discard_weight)
+ */
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+ int score;
+ score =
+ scheduler->categories[i].state.frms *
+ scheduler->categories[i].specs.discard_weight;
+ if (max_score == 0 || score > max_score) {
+ max_score = score;
+ cat = i;
+ }
+ }
+ return cat;
+}
+
+void
+ol_tx_sched_txq_discard_wrr_adv(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int cat, int frames, int bytes)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+ category = &scheduler->categories[cat];
+
+ if (0 == txq->frms)
+ TAILQ_REMOVE(&category->state.head, txq, list_elem);
+
+
+ category->state.frms -= frames;
+ category->state.bytes -= bytes;
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frames);
+ if (category->state.frms == 0)
+ category->state.active = 0;
+}
+
+void
+ol_tx_sched_category_info_wrr_adv(
+ struct ol_txrx_pdev_t *pdev,
+ int cat, int *active,
+ int *frms, int *bytes)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+ struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+ category = &scheduler->categories[cat];
+ *active = category->state.active;
+ *frms = category->state.frms;
+ *bytes = category->state.bytes;
+}
+
+void *
+ol_tx_sched_init_wrr_adv(
+ struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_wrr_adv_t *scheduler;
+ int i;
+
+ scheduler = qdf_mem_malloc(
+ sizeof(struct ol_tx_sched_wrr_adv_t));
+ if (scheduler == NULL)
+ return scheduler;
+
+ qdf_mem_zero(scheduler, sizeof(*scheduler));
+
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(NON_QOS_DATA, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(UCAST_MGMT, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_DATA, scheduler);
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_MGMT, scheduler);
+
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+ scheduler->categories[i].state.active = 0;
+ scheduler->categories[i].state.frms = 0;
+ /*scheduler->categories[i].state.bytes = 0;*/
+ TAILQ_INIT(&scheduler->categories[i].state.head);
+ /* init categories to not be skipped before
+ *their initial selection */
+ scheduler->categories[i].state.wrr_count =
+ scheduler->categories[i].specs.wrr_skip_weight - 1;
+ }
+
+ /*
+ * Init the order array - the initial ordering doesn't matter, as the
+ * order array will get reshuffled as data arrives.
+ */
+ for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++)
+ scheduler->order[i] = i;
+
+ return scheduler;
+}
+
+
+/* WMM parameters are suppposed to be passed when associate with AP.
+ * According to AIFS+CWMin, the function maps each queue to one of four default
+ * settings of the scheduler, ie. VO, VI, BE, or BK.
+ */
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ struct ol_tx_wmm_param_t wmm_param)
+{
+ struct ol_tx_sched_wrr_adv_t def_cfg;
+ struct ol_tx_sched_wrr_adv_t *scheduler =
+ data_pdev->tx_sched.scheduler;
+ u_int32_t i, ac_selected;
+ u_int32_t weight[OL_TX_NUM_WMM_AC], default_edca[OL_TX_NUM_WMM_AC];
+
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, (&def_cfg));
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, (&def_cfg));
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, (&def_cfg));
+ OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, (&def_cfg));
+
+ /* default_eca = AIFS + CWMin */
+ default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] =
+ OL_TX_AIFS_DEFAULT_VO + OL_TX_CW_MIN_DEFAULT_VO;
+ default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] =
+ OL_TX_AIFS_DEFAULT_VI + OL_TX_CW_MIN_DEFAULT_VI;
+ default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] =
+ OL_TX_AIFS_DEFAULT_BE + OL_TX_CW_MIN_DEFAULT_BE;
+ default_edca[OL_TX_SCHED_WRR_ADV_CAT_BK] =
+ OL_TX_AIFS_DEFAULT_BK + OL_TX_CW_MIN_DEFAULT_BK;
+
+ weight[OL_TX_SCHED_WRR_ADV_CAT_VO] =
+ wmm_param.ac[OL_TX_WMM_AC_VO].aifs +
+ wmm_param.ac[OL_TX_WMM_AC_VO].cwmin;
+ weight[OL_TX_SCHED_WRR_ADV_CAT_VI] =
+ wmm_param.ac[OL_TX_WMM_AC_VI].aifs +
+ wmm_param.ac[OL_TX_WMM_AC_VI].cwmin;
+ weight[OL_TX_SCHED_WRR_ADV_CAT_BK] =
+ wmm_param.ac[OL_TX_WMM_AC_BK].aifs +
+ wmm_param.ac[OL_TX_WMM_AC_BK].cwmin;
+ weight[OL_TX_SCHED_WRR_ADV_CAT_BE] =
+ wmm_param.ac[OL_TX_WMM_AC_BE].aifs +
+ wmm_param.ac[OL_TX_WMM_AC_BE].cwmin;
+
+ for (i = 0; i < OL_TX_NUM_WMM_AC; i++) {
+ if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] >= weight[i])
+ ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VO;
+ else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] >= weight[i])
+ ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VI;
+ else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] >= weight[i])
+ ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BE;
+ else
+ ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BK;
+
+
+ scheduler->categories[i].specs.wrr_skip_weight =
+ def_cfg.categories[ac_selected].specs.wrr_skip_weight;
+ scheduler->categories[i].specs.credit_threshold =
+ def_cfg.categories[ac_selected].specs.credit_threshold;
+ scheduler->categories[i].specs.send_limit =
+ def_cfg.categories[ac_selected].specs.send_limit;
+ scheduler->categories[i].specs.credit_reserve =
+ def_cfg.categories[ac_selected].specs.credit_reserve;
+ scheduler->categories[i].specs.discard_weight =
+ def_cfg.categories[ac_selected].specs.discard_weight;
+ }
+}
+
+#endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
+
+/*--- congestion control discard --------------------------------------------*/
+
+struct ol_tx_frms_queue_t *
+ol_tx_sched_discard_select_txq(
+ struct ol_txrx_pdev_t *pdev,
+ ol_tx_frms_queue_list *tx_queues)
+{
+ struct ol_tx_frms_queue_t *txq;
+ struct ol_tx_frms_queue_t *selected_txq = NULL;
+ int max_frms = 0;
+
+ /* return the tx queue with the most frames */
+ TAILQ_FOREACH(txq, tx_queues, list_elem) {
+ if (txq->frms > max_frms) {
+ max_frms = txq->frms;
+ selected_txq = txq;
+ }
+ }
+ return selected_txq;
+}
+
+u_int16_t
+ol_tx_sched_discard_select(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t frms,
+ ol_tx_desc_list *tx_descs,
+ bool force)
+{
+ int cat;
+ struct ol_tx_frms_queue_t *txq;
+ int bytes;
+ u_int32_t credit;
+ struct ol_tx_sched_notify_ctx_t notify_ctx;
+
+ /* first decide what category of traffic (e.g. TID or AC)
+ *to discard next */
+ cat = ol_tx_sched_discard_select_category(pdev);
+
+ /* then decide which peer within this category to discard from next */
+ txq = ol_tx_sched_discard_select_txq(
+ pdev, ol_tx_sched_category_tx_queues(pdev, cat));
+ if (NULL == txq)
+ /* No More pending Tx Packets in Tx Queue. Exit Discard loop */
+ return 0;
+
+
+ if (force == false) {
+ /*
+ * Now decide how many frames to discard from this peer-TID.
+ * Don't discard more frames than the caller has specified.
+ * Don't discard more than a fixed quantum of frames at a time.
+ * Don't discard more than 50% of the queue's frames at a time,
+ * but if there's only 1 frame left, go ahead and discard it.
+ */
+#define OL_TX_DISCARD_QUANTUM 10
+ if (OL_TX_DISCARD_QUANTUM < frms)
+ frms = OL_TX_DISCARD_QUANTUM;
+
+
+ if (txq->frms > 1 && frms >= (txq->frms >> 1))
+ frms = txq->frms >> 1;
+ }
+
+ /*
+ * Discard from the head of the queue, because:
+ * 1. Front-dropping gives applications like TCP that include ARQ
+ * an early notification of congestion.
+ * 2. For time-sensitive applications like RTP, the newest frames are
+ * most relevant.
+ */
+ credit = 10000; /* no credit limit */
+ frms = ol_tx_dequeue(pdev, txq, tx_descs, frms, &credit, &bytes);
+
+ notify_ctx.event = OL_TX_DISCARD_FRAMES;
+ notify_ctx.frames = frms;
+ notify_ctx.bytes = bytes;
+ notify_ctx.txq = txq;
+ notify_ctx.info.ext_tid = cat;
+ ol_tx_sched_notify(pdev, ¬ify_ctx);
+
+ TX_SCHED_DEBUG_PRINT("%s Tx Drop : %d\n", __func__, frms);
+ return frms;
+}
+
+/*--- scheduler framework ---------------------------------------------------*/
+
+/*
+ * The scheduler mutex spinlock has been acquired outside this function,
+ * so there is need to take locks inside this function.
+ */
+void
+ol_tx_sched_notify(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_notify_ctx_t *ctx)
+{
+ struct ol_tx_frms_queue_t *txq = ctx->txq;
+ int tid;
+
+ if (!pdev->tx_sched.scheduler)
+ return;
+
+ switch (ctx->event) {
+ case OL_TX_ENQUEUE_FRAME:
+ tid = ctx->info.tx_msdu_info->htt.info.ext_tid;
+ ol_tx_sched_txq_enqueue(pdev, txq, tid, 1, ctx->bytes);
+ break;
+ case OL_TX_DELETE_QUEUE:
+ tid = ctx->info.ext_tid;
+ if (txq->flag == ol_tx_queue_active)
+ ol_tx_sched_txq_deactivate(pdev, txq, tid);
+
+ break;
+ case OL_TX_PAUSE_QUEUE:
+ tid = ctx->info.ext_tid;
+ if (txq->flag == ol_tx_queue_active)
+ ol_tx_sched_txq_deactivate(pdev, txq, tid);
+
+ break;
+ case OL_TX_UNPAUSE_QUEUE:
+ tid = ctx->info.ext_tid;
+ if (txq->frms != 0)
+ ol_tx_sched_txq_enqueue(pdev, txq, tid,
+ txq->frms, txq->bytes);
+
+ break;
+ case OL_TX_DISCARD_FRAMES:
+ /* not necessarily TID, could be category */
+ tid = ctx->info.ext_tid;
+ ol_tx_sched_txq_discard(pdev, txq, tid,
+ ctx->frames, ctx->bytes);
+ break;
+ default:
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Error: unknown sched notification (%d)\n",
+ ctx->event);
+ qdf_assert(0);
+ break;
+ }
+}
+
+#define OL_TX_MSDU_ID_STORAGE_ERR(ptr) (NULL == ptr)
+
+void
+ol_tx_sched_dispatch(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_ctx *sctx)
+{
+ qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
+ struct ol_tx_desc_t *tx_desc;
+
+ u_int16_t *msdu_id_storage;
+ u_int16_t msdu_id;
+ int num_msdus = 0;
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ while (sctx->frms) {
+ tx_desc = TAILQ_FIRST(&sctx->head);
+ if (tx_desc == NULL) {
+ /* TODO: find its reason */
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: err, no enough tx_desc from stx->head.\n",
+ __func__);
+ break;
+ }
+ msdu = tx_desc->netbuf;
+ TAILQ_REMOVE(&sctx->head, tx_desc, tx_desc_list_elem);
+ if (NULL == head_msdu)
+ head_msdu = msdu;
+
+ if (prev)
+ qdf_nbuf_set_next(prev, msdu);
+
+ prev = msdu;
+
+#ifndef ATH_11AC_TXCOMPACT
+ /*
+ * When the tx frame is downloaded to the target, there are two
+ * outstanding references:
+ * 1. The host download SW (HTT, HTC, HIF)
+ * This reference is cleared by the ol_tx_send_done callback
+ * functions.
+ * 2. The target FW
+ * This reference is cleared by the ol_tx_completion_handler
+ * function.
+ * It is extremely probable that the download completion is
+ * processed before the tx completion message. However, under
+ * exceptional conditions the tx completion may be processed
+ *first. Thus, rather that assuming that reference (1) is
+ *done before reference (2),
+ * explicit reference tracking is needed.
+ * Double-increment the ref count to account for both references
+ * described above.
+ */
+ qdf_atomic_init(&tx_desc->ref_cnt);
+ qdf_atomic_inc(&tx_desc->ref_cnt);
+ qdf_atomic_inc(&tx_desc->ref_cnt);
+#endif
+
+ /*Store the MSDU Id for each MSDU*/
+ /* store MSDU ID */
+ msdu_id = ol_tx_desc_id(pdev, tx_desc);
+ msdu_id_storage = ol_tx_msdu_id_storage(msdu);
+ if (OL_TX_MSDU_ID_STORAGE_ERR(msdu_id_storage)) {
+ /*
+ * Send the prior frames as a batch,
+ *then send this as a single,
+ * then resume handling the remaining frames.
+ */
+ if (head_msdu)
+ ol_tx_send_batch(pdev, head_msdu, num_msdus);
+
+ prev = NULL;
+ head_msdu = prev;
+ num_msdus = 0;
+
+ if (htt_tx_send_std(pdev->htt_pdev, msdu, msdu_id)) {
+ ol_tx_target_credit_incr(pdev, msdu);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
+ 1 /* error */);
+ }
+ } else {
+ *msdu_id_storage = msdu_id;
+ num_msdus++;
+ }
+ sctx->frms--;
+ }
+
+ /*Send Batch Of Frames*/
+ if (head_msdu)
+ ol_tx_send_batch(pdev, head_msdu, num_msdus);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+ void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_sched_ctx sctx;
+ u_int32_t credit;
+
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ return;
+ }
+ pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
+
+ ol_tx_sched_log(pdev);
+ /*adf_os_print("BEFORE tx sched:\n");*/
+ /*ol_tx_queues_display(pdev);*/
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+ TAILQ_INIT(&sctx.head);
+ sctx.frms = 0;
+
+ ol_tx_sched_select_init(pdev);
+ while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
+ int num_credits;
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ credit = qdf_atomic_read(&pdev->target_tx_credit);
+ num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
+ if (num_credits > 0) {
+#if DEBUG_HTT_CREDIT
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+ " <HTT> Decrease credit %d - %d = %d.\n",
+ qdf_atomic_read(&pdev->target_tx_credit),
+ num_credits,
+ qdf_atomic_read(&pdev->target_tx_credit) -
+ num_credits);
+#endif
+ qdf_atomic_add(-num_credits, &pdev->target_tx_credit);
+ }
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+ if (num_credits == 0)
+ break;
+ }
+ ol_tx_sched_dispatch(pdev, &sctx);
+
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ /*adf_os_print("AFTER tx sched:\n");*/
+ /*ol_tx_queues_display(pdev);*/
+
+ pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void *
+ol_tx_sched_attach(
+ struct ol_txrx_pdev_t *pdev)
+{
+ pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
+ return ol_tx_sched_init(pdev);
+}
+
+void
+ol_tx_sched_detach(
+ struct ol_txrx_pdev_t *pdev)
+{
+ if (pdev->tx_sched.scheduler) {
+ qdf_mem_free(pdev->tx_sched.scheduler);
+ pdev->tx_sched.scheduler = NULL;
+ }
+}
+
+/*--- debug functions -------------------------------------------------------*/
+
+#if defined(DEBUG_HL_LOGGING)
+
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
+{
+ u_int8_t *buf;
+ u_int32_t *active_bitmap;
+ int i, j, num_cats_active;
+ int active, frms, bytes;
+ int credit;
+
+ /* don't bother recording state if credit is zero */
+ credit = qdf_atomic_read(&pdev->target_tx_credit);
+ if (credit == 0)
+ return;
+
+
+ /*
+ * See how many TIDs are active, so queue state can be stored only
+ * for those TIDs.
+ * Do an initial iteration through all categories to see if any
+ * are active. Doing an extra iteration is inefficient, but
+ * efficiency is not a dominant concern when logging is enabled.
+ */
+ num_cats_active = 0;
+ for (i = 0; i < OL_TX_SCHED_NUM_CATEGORIES; i++) {
+ ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
+ if (active)
+ num_cats_active++;
+ }
+ /* don't bother recording state if there are no active queues */
+ if (num_cats_active == 0)
+ return;
+
+
+ ol_tx_queue_log_sched(pdev, credit, &num_cats_active,
+ &active_bitmap, &buf);
+
+ if (num_cats_active == 0)
+ return;
+
+ *active_bitmap = 0;
+ for (i = 0, j = 0;
+ i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
+ i++) {
+ u_int8_t *p;
+ ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
+ if (!active)
+ continue;
+
+ p = &buf[j*6];
+ p[0] = (frms >> 0) & 0xff;
+ p[1] = (frms >> 8) & 0xff;
+
+ p[2] = (bytes >> 0) & 0xff;
+ p[3] = (bytes >> 8) & 0xff;
+ p[4] = (bytes >> 16) & 0xff;
+ p[5] = (bytes >> 24) & 0xff;
+ j++;
+ *active_bitmap |= 1 << i;
+ }
+}
+
+#endif /* defined(DEBUG_HL_LOGGING) */
+
+void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
+{
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
+}
+
+void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
+{
+ OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
+}
+
+void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
+{
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
diff --git a/core/dp/txrx/ol_tx_sched.h b/core/dp/txrx/ol_tx_sched.h
new file mode 100644
index 0000000..cc5d48b
--- /dev/null
+++ b/core/dp/txrx/ol_tx_sched.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_sched.h
+ * @brief API definitions for the tx scheduler module within the data SW.
+ */
+#ifndef _OL_TX_SCHED__H_
+#define _OL_TX_SCHED__H_
+
+#include <qdf_types.h>
+
+enum ol_tx_queue_action {
+ OL_TX_ENQUEUE_FRAME,
+ OL_TX_DELETE_QUEUE,
+ OL_TX_PAUSE_QUEUE,
+ OL_TX_UNPAUSE_QUEUE,
+ OL_TX_DISCARD_FRAMES,
+};
+
+struct ol_tx_sched_notify_ctx_t {
+ int event;
+ struct ol_tx_frms_queue_t *txq;
+ union {
+ int ext_tid;
+ struct ol_txrx_msdu_info_t *tx_msdu_info;
+ } info;
+ int frames;
+ int bytes;
+};
+
+#if defined(CONFIG_HL_SUPPORT)
+
+void
+ol_tx_sched_notify(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_notify_ctx_t *ctx);
+
+void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev);
+
+u_int16_t
+ol_tx_sched_discard_select(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t frms,
+ ol_tx_desc_list *tx_descs,
+ bool force);
+
+void *
+ol_tx_sched_attach(struct ol_txrx_pdev_t *pdev);
+
+void
+ol_tx_sched_detach(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev);
+
+#else
+
+static inline void
+ol_tx_sched_notify(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_sched_notify_ctx_t *ctx)
+{
+ return;
+}
+
+static inline void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline u_int16_t
+ol_tx_sched_discard_select(
+ struct ol_txrx_pdev_t *pdev,
+ u_int16_t frms,
+ ol_tx_desc_list *tx_descs,
+ bool force)
+{
+ return 0;
+}
+
+static inline void *
+ol_tx_sched_attach(struct ol_txrx_pdev_t *pdev)
+{
+ return NULL;
+}
+
+static inline void
+ol_tx_sched_detach(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
+/*
+ * HL needs to keep track of the amount of credit available to download
+ * tx frames to the target - the download scheduler decides when to
+ * download frames, and which frames to download, based on the credit
+ * availability.
+ * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
+ * of the target_tx_credit, to determine when to poll for tx completion
+ * messages.
+ */
+
+static inline void
+ol_tx_target_credit_adjust(int factor,
+ struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ qdf_atomic_add(factor * htt_tx_msdu_credit(msdu),
+ &pdev->target_tx_credit);
+}
+
+static inline void ol_tx_target_credit_decr(struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ ol_tx_target_credit_adjust(-1, pdev, msdu);
+}
+
+static inline void ol_tx_target_credit_incr(struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ ol_tx_target_credit_adjust(1, pdev, msdu);
+}
+#else
+/*
+ * LL does not need to keep track of target credit.
+ * Since the host tx descriptor pool size matches the target's,
+ * we know the target has space for the new tx frame if the host's
+ * tx descriptor allocation succeeded.
+ */
+static inline void
+ol_tx_target_credit_adjust(int factor,
+ struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ return;
+}
+
+static inline void ol_tx_target_credit_decr(struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ return;
+}
+
+static inline void ol_tx_target_credit_incr(struct ol_txrx_pdev_t *pdev,
+ qdf_nbuf_t msdu)
+{
+ return;
+}
+#endif
+#endif /* _OL_TX_SCHED__H_ */
diff --git a/core/dp/txrx/ol_tx_send.c b/core/dp/txrx/ol_tx_send.c
index b381d1f4..9bfbc47 100644
--- a/core/dp/txrx/ol_tx_send.c
+++ b/core/dp/txrx/ol_tx_send.c
@@ -47,15 +47,20 @@
#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
#ifdef QCA_COMPUTE_TX_DELAY
+#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
#endif
#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
#include <ol_osif_txrx_api.h>
#include <ol_tx.h> /* ol_tx_reinject */
#include <ol_cfg.h> /* ol_cfg_is_high_latency */
+#include <ol_tx_sched.h>
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
#endif
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+
#ifdef TX_CREDIT_RECLAIM_SUPPORT
@@ -73,7 +78,8 @@
#endif /* TX_CREDIT_RECLAIM_SUPPORT */
-#if defined(TX_CREDIT_RECLAIM_SUPPORT)
+#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
+
/*
* HL needs to keep track of the amount of credit available to download
* tx frames to the target - the download scheduler decides when to
@@ -83,53 +89,87 @@
* of the target_tx_credit, to determine when to poll for tx completion
* messages.
*/
-#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \
- qdf_atomic_add( \
- factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit)
-#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \
- OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu)
-#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \
- OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu)
-#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \
- qdf_atomic_add(-1 * delta, &pdev->target_tx_credit)
-#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \
- qdf_atomic_add(delta, &pdev->target_tx_credit)
+static inline void
+ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+ qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
+}
+
+static inline void
+ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+ qdf_atomic_add(delta, &pdev->target_tx_credit);
+}
#else
-/*
- * LL does not need to keep track of target credit.
- * Since the host tx descriptor pool size matches the target's,
- * we know the target has space for the new tx frame if the host's
- * tx descriptor allocation succeeded.
- */
-#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) /* no-op */
-#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) /* no-op */
-#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) /* no-op */
-#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) /* no-op */
-#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) /* no-op */
+
+static inline void
+ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+ return;
+}
+
+static inline void
+ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+ return;
+}
#endif
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) \
- do { \
- struct ol_txrx_vdev_t *vdev; \
- TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { \
- if (qdf_atomic_read(&vdev->os_q_paused) && \
- (vdev->tx_fl_hwm != 0)) { \
- qdf_spin_lock(&pdev->tx_mutex); \
- if (pdev->tx_desc.num_free > \
- vdev->tx_fl_hwm) { \
- qdf_atomic_set(&vdev->os_q_paused, 0); \
- qdf_spin_unlock(&pdev->tx_mutex); \
- ol_txrx_flow_control_cb(vdev, true);\
- } \
- else { \
- qdf_spin_unlock(&pdev->tx_mutex); \
- } \
- } \
- } \
- } while (0)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+
+/**
+ * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
+ * @pdev: physical device object
+ *
+ *
+ * Return: None
+ */
+static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+ struct ol_txrx_vdev_t *vdev;
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (qdf_atomic_read(&vdev->os_q_paused) &&
+ (vdev->tx_fl_hwm != 0)) {
+ qdf_spin_lock(&pdev->tx_mutex);
+ if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
+ qdf_atomic_set(&vdev->os_q_paused, 0);
+ qdf_spin_unlock(&pdev->tx_mutex);
+ ol_txrx_flow_control_cb(vdev, true);
+ } else {
+ qdf_spin_unlock(&pdev->tx_mutex);
+ }
+ }
+ }
+}
+#elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+
+static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+ struct ol_txrx_vdev_t *vdev;
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (qdf_atomic_read(&vdev->os_q_paused) &&
+ (vdev->tx_fl_hwm != 0)) {
+ qdf_spin_lock(&pdev->tx_mutex);
+ if (((ol_tx_desc_pool_size_hl(
+ vdev->pdev->ctrl_pdev) >> 1)
+ - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED)
+ - qdf_atomic_read(&vdev->tx_desc_count)
+ > vdev->tx_fl_hwm) {
+ qdf_atomic_set(&vdev->os_q_paused, 0);
+ qdf_spin_unlock(&pdev->tx_mutex);
+ vdev->osif_flow_control_cb(vdev, true);
+ } else {
+ qdf_spin_unlock(&pdev->tx_mutex);
+ }
+ }
+ }
+}
#else
-#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)
+
+static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+ return;
+}
#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
static inline uint16_t
@@ -145,7 +185,7 @@
qdf_nbuf_len(msdu));
msdu_credit_consumed = htt_tx_msdu_credit(msdu);
- OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
+ ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
OL_TX_CREDIT_RECLAIM(pdev);
/*
@@ -190,7 +230,7 @@
vdev_id));
failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
if (qdf_unlikely(failed)) {
- OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+ ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
}
}
@@ -212,7 +252,7 @@
msdu_id_storage = ol_tx_msdu_id_storage(rejected);
tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
- OL_TX_TARGET_CREDIT_INCR(pdev, rejected);
+ ol_tx_target_credit_incr(pdev, rejected);
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
rejected = next;
@@ -235,7 +275,7 @@
if (failed) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
"Error: freeing tx frame after htt_tx failed");
- OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+ ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
}
}
@@ -266,7 +306,7 @@
}
if (status != A_OK) {
- OL_TX_TARGET_CREDIT_INCR(pdev, msdu);
+ ol_tx_target_credit_incr(pdev, msdu);
ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1 /* download err */);
} else {
@@ -340,9 +380,15 @@
ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
enum htt_tx_status status,
uint16_t *desc_ids, int num_msdus);
-#define OL_TX_DELAY_COMPUTE ol_tx_delay_compute
+
#else
-#define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus) /* no-op */
+static inline void
+ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
+ enum htt_tx_status status,
+ uint16_t *desc_ids, int num_msdus)
+{
+ return;
+}
#endif /* QCA_COMPUTE_TX_DELAY */
#ifndef OL_TX_RESTORE_HDR
@@ -469,8 +515,11 @@
{
ol_tx_target_credit_update(pdev, credits);
+ if (pdev->cfg.is_high_latency)
+ ol_tx_sched(pdev);
+
/* UNPAUSE OS Q */
- OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+ ol_tx_flow_ct_unpause_os_q(pdev);
}
/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
@@ -495,7 +544,7 @@
ol_tx_desc_list tx_descs;
TAILQ_INIT(&tx_descs);
- OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus);
+ ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
for (i = 0; i < num_msdus; i++) {
tx_desc_id = desc_ids[i];
@@ -507,6 +556,7 @@
qdf_nbuf_data_addr(netbuf),
sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status));
qdf_runtime_pm_put();
+ ol_tx_desc_update_group_credit(pdev, tx_desc_id, 1, 0, status);
/* Per SDU update of byte count */
byte_cnt += qdf_nbuf_len(netbuf);
if (OL_TX_DESC_NO_REFS(tx_desc)) {
@@ -540,14 +590,158 @@
status != htt_tx_status_ok);
}
- OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+ if (pdev->cfg.is_high_latency) {
+ /*
+ * Credit was already explicitly updated by HTT,
+ * but update the number of available tx descriptors,
+ * then invoke the scheduler, since new credit is probably
+ * available now.
+ */
+ qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
+ ol_tx_sched(pdev);
+ } else {
+ ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
+ }
/* UNPAUSE OS Q */
- OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+ ol_tx_flow_ct_unpause_os_q(pdev);
/* Do one shot statistics */
TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
}
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
+ u_int16_t tx_desc_id, int credit, u_int8_t absolute,
+ enum htt_tx_status status)
+{
+ uint8_t i, is_member;
+ uint16_t vdev_id_mask;
+ struct ol_tx_desc_t *tx_desc;
+
+ tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+ vdev_id_mask =
+ OL_TXQ_GROUP_VDEV_ID_MASK_GET(
+ pdev->txq_grps[i].membership);
+ is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
+ tx_desc->vdev->vdev_id);
+ if (is_member) {
+ ol_txrx_update_group_credit(&pdev->txq_grps[i],
+ credit, absolute);
+ break;
+ }
+ }
+ ol_tx_update_group_credit_stats(pdev);
+}
+
+#ifdef DEBUG_HL_LOGGING
+
+void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+ uint16_t curr_index;
+ uint8_t i;
+
+ qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+ pdev->grp_stats.last_valid_index++;
+ if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
+ - 1)) {
+ pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
+ pdev->grp_stats.wrap_around = 1;
+ }
+ curr_index = pdev->grp_stats.last_valid_index;
+
+ for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+ pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
+ OL_TXQ_GROUP_VDEV_ID_MASK_GET(
+ pdev->txq_grps[i].membership);
+ pdev->grp_stats.stats[curr_index].grp[i].credit =
+ qdf_atomic_read(&pdev->txq_grps[i].credit);
+ }
+
+ qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+}
+
+void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+ uint16_t i, j, is_break = 0;
+ int16_t curr_index, old_index, wrap_around;
+ uint16_t curr_credit, old_credit, mem_vdevs;
+
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Group credit stats:");
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ " No: GrpID: Credit: Change: vdev_map");
+
+ qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+ curr_index = pdev->grp_stats.last_valid_index;
+ wrap_around = pdev->grp_stats.wrap_around;
+ qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+
+ if (curr_index < 0) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "Not initialized");
+ return;
+ }
+
+ for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
+ old_index = curr_index - 1;
+ if (old_index < 0) {
+ if (wrap_around == 0)
+ is_break = 1;
+ else
+ old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
+ }
+
+ for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
+ qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+ curr_credit =
+ pdev->grp_stats.stats[curr_index].
+ grp[j].credit;
+ if (!is_break)
+ old_credit =
+ pdev->grp_stats.stats[old_index].
+ grp[j].credit;
+
+ mem_vdevs =
+ pdev->grp_stats.stats[curr_index].grp[j].
+ member_vdevs;
+ qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+
+ if (!is_break)
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "%4d: %5d: %6d %6d %8x",
+ curr_index, j,
+ curr_credit,
+ (curr_credit - old_credit),
+ mem_vdevs);
+ else
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "%4d: %5d: %6d %6s %8x",
+ curr_index, j,
+ curr_credit, "NA", mem_vdevs);
+ }
+
+ if (is_break)
+ break;
+
+ curr_index = old_index;
+ }
+}
+
+void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+ qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+ qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
+ pdev->grp_stats.last_valid_index = -1;
+ pdev->grp_stats.wrap_around = 0;
+ qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+}
+#endif
+#endif
+
/*
* ol_tx_single_completion_handler performs the same tx completion
* processing as ol_tx_completion_handler, but for a single frame.
@@ -581,8 +775,18 @@
qdf_atomic_read(&pdev->target_tx_credit),
1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
-
- qdf_atomic_add(1, &pdev->target_tx_credit);
+ if (pdev->cfg.is_high_latency) {
+ /*
+ * Credit was already explicitly updated by HTT,
+ * but update the number of available tx descriptors,
+ * then invoke the scheduler, since new credit is probably
+ * available now.
+ */
+ qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
+ ol_tx_sched(pdev);
+ } else {
+ qdf_atomic_add(1, &pdev->target_tx_credit);
+ }
}
/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
@@ -649,7 +853,12 @@
qdf_atomic_read(&pdev->target_tx_credit) +
num_msdus);
- OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+ if (pdev->cfg.is_high_latency) {
+ /* credit was already explicitly updated by HTT */
+ ol_tx_sched(pdev);
+ } else {
+ ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
+ }
}
#ifdef QCA_COMPUTE_TX_DELAY
@@ -777,31 +986,6 @@
}
#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
-static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
- qdf_nbuf_t tx_nbuf)
-{
- uint8_t *hdr_ptr;
- void *datap = qdf_nbuf_data(tx_nbuf);
-
- if (pdev->frame_format == wlan_frm_fmt_raw) {
- /* adjust hdr_ptr to RA */
- struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
- hdr_ptr = wh->i_addr1;
- } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
- /* adjust hdr_ptr to RA */
- struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
- hdr_ptr = wh->i_addr1;
- } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
- hdr_ptr = datap;
- } else {
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "Invalid standard frame type: %d",
- pdev->frame_format);
- qdf_assert(0);
- hdr_ptr = NULL;
- }
- return hdr_ptr;
-}
static uint8_t
ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
diff --git a/core/dp/txrx/ol_tx_send.h b/core/dp/txrx/ol_tx_send.h
index d7d6072..460fe71 100644
--- a/core/dp/txrx/ol_tx_send.h
+++ b/core/dp/txrx/ol_tx_send.h
@@ -35,11 +35,20 @@
#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <cdp_txrx_cmn.h> /* ol_txrx_vdev_t, etc. */
+#if defined(CONFIG_HL_SUPPORT)
+
+static inline void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
+{
+ return;
+}
+#else
+
/**
* @flush the ol tx when surprise remove.
*
*/
void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev);
+#endif
/**
* @brief Send a tx frame to the target.
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index 0a4b6f7..09cffd3 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -68,6 +68,7 @@
#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
#include <ol_tx_queue.h>
+#include <ol_tx_sched.h> /* ol_tx_sched_attach, etc. */
#include <ol_txrx.h>
#include <cdp_txrx_flow_ctrl_legacy.h>
#include <cdp_txrx_ipa.h>
@@ -80,6 +81,132 @@
#include <cds_concurrency.h>
#include "epping_main.h"
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
+ * @vdev: the data virtual device
+ * @bss_addr: bss address
+ *
+ * Return: None
+ */
+void
+ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
+{
+ if (bss_addr && vdev->last_real_peer &&
+ (qdf_mem_cmp((u8 *)bss_addr,
+ vdev->last_real_peer->mac_addr.raw,
+ IEEE80211_ADDR_LEN) == 0))
+ qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
+ vdev->last_real_peer->mac_addr.raw,
+ OL_TXRX_MAC_ADDR_LEN);
+}
+
+/**
+ * ol_txrx_add_last_real_peer() - add last peer
+ * @pdev: the data physical device
+ * @vdev: virtual device
+ * @peer_id: peer id
+ *
+ * Return: None
+ */
+void
+ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev,
+ uint8_t *peer_id)
+{
+ ol_txrx_peer_handle peer;
+ if (vdev->last_real_peer == NULL) {
+ peer = NULL;
+ peer = ol_txrx_find_peer_by_addr(pdev,
+ vdev->hl_tdls_ap_mac_addr.raw,
+ peer_id);
+ if (peer && (peer->peer_ids[0] !=
+ HTT_INVALID_PEER_ID))
+ vdev->last_real_peer = peer;
+ }
+}
+
+/**
+ * is_vdev_restore_last_peer() - check for vdev last peer
+ * @peer: peer object
+ *
+ * Return: true if last peer is not null
+ */
+bool
+is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
+{
+ struct ol_txrx_vdev_t *vdev;
+ vdev = peer->vdev;
+ return vdev->last_real_peer && (vdev->last_real_peer == peer);
+}
+
+/**
+ * ol_txrx_update_last_real_peer() - check for vdev last peer
+ * @pdev: the data physical device
+ * @peer: peer device
+ * @peer_id: peer id
+ * @restore_last_peer: restore last peer flag
+ *
+ * Return: None
+ */
+void
+ol_txrx_update_last_real_peer(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ uint8_t *peer_id, bool restore_last_peer)
+{
+ struct ol_txrx_vdev_t *vdev;
+ vdev = peer->vdev;
+ if (restore_last_peer && (vdev->last_real_peer == NULL)) {
+ peer = NULL;
+ peer = ol_txrx_find_peer_by_addr(pdev,
+ vdev->hl_tdls_ap_mac_addr.raw, peer_id);
+ if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+ vdev->last_real_peer = peer;
+ }
+}
+#endif
+
+u_int16_t
+ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev)
+{
+ u_int16_t desc_pool_size;
+ u_int16_t steady_state_tx_lifetime_ms;
+ u_int16_t safety_factor;
+
+ /*
+ * Steady-state tx latency:
+ * roughly 1-2 ms flight time
+ * + roughly 1-2 ms prep time,
+ * + roughly 1-2 ms target->host notification time.
+ * = roughly 6 ms total
+ * Thus, steady state number of frames =
+ * steady state max throughput / frame size * tx latency, e.g.
+ * 1 Gbps / 1500 bytes * 6 ms = 500
+ *
+ */
+ steady_state_tx_lifetime_ms = 6;
+
+ safety_factor = 8;
+
+ desc_pool_size =
+ ol_cfg_max_thruput_mbps(ctrl_pdev) *
+ 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
+ (8 * OL_TX_AVG_FRM_BYTES) *
+ steady_state_tx_lifetime_ms *
+ safety_factor;
+
+ /* minimum */
+ if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
+ desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
+
+ /* maximum */
+ if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
+ desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
+
+ return desc_pool_size;
+}
/*=== function definitions ===*/
@@ -137,7 +264,7 @@
{
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "peer argument is null!!");
+ "peer argument is null!!");
return QDF_STATUS_E_FAILURE;
}
@@ -159,14 +286,14 @@
pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "PDEV not found for sta_id [%d]", sta_id);
+ "PDEV not found for sta_id [%d]", sta_id);
return NULL;
}
peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "PEER [%d] not found", sta_id);
+ "PEER [%d] not found", sta_id);
return NULL;
}
@@ -290,6 +417,105 @@
#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
#endif
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * ol_txrx_update_group_credit() - update group credit for tx queue
+ * @group: for which credit needs to be updated
+ * @credit: credits
+ * @absolute: TXQ group absolute
+ *
+ * Return: allocated pool size
+ */
+void ol_txrx_update_group_credit(
+ struct ol_tx_queue_group_t *group,
+ int32_t credit,
+ u_int8_t absolute)
+{
+ if (absolute)
+ qdf_atomic_set(&group->credit, credit);
+ else
+ qdf_atomic_add(credit, &group->credit);
+}
+
+/**
+ * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
+ * vdev id mask and ac mask is not matching
+ * @pdev: the data physical device
+ * @group_id: TXQ group id
+ * @credit: TXQ group credit count
+ * @absolute: TXQ group absolute
+ * @vdev_id_mask: TXQ vdev group id mask
+ * @ac_mask: TQX access category mask
+ *
+ * Return: None
+ */
+void ol_txrx_update_tx_queue_groups(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t group_id,
+ int32_t credit,
+ u_int8_t absolute,
+ u_int32_t vdev_id_mask,
+ u_int32_t ac_mask
+ )
+{
+ struct ol_tx_queue_group_t *group;
+ u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
+ u_int32_t membership;
+ struct ol_txrx_vdev_t *vdev;
+ group = &pdev->txq_grps[group_id];
+
+ membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
+
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ /*
+ * if the membership (vdev id mask and ac mask)
+ * matches then no need to update tx qeue groups.
+ */
+ if (group->membership == membership)
+ /* Update Credit Only */
+ goto credit_update;
+
+
+ /*
+ * membership (vdev id mask and ac mask) is not matching
+ * TODO: ignoring ac mask for now
+ */
+ group_vdev_id_mask =
+ OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ group_vdev_bit_mask =
+ OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
+ group_vdev_id_mask, vdev->vdev_id);
+ vdev_bit_mask =
+ OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
+ vdev_id_mask, vdev->vdev_id);
+
+ if (group_vdev_bit_mask != vdev_bit_mask) {
+ /*
+ * Change in vdev tx queue group
+ */
+ if (!vdev_bit_mask) {
+ /* Set Group Pointer (vdev and peer) to NULL */
+ ol_tx_set_vdev_group_ptr(
+ pdev, vdev->vdev_id, NULL);
+ } else {
+ /* Set Group Pointer (vdev and peer) */
+ ol_tx_set_vdev_group_ptr(
+ pdev, vdev->vdev_id, group);
+ }
+ }
+ }
+ /* Update membership */
+ group->membership = membership;
+credit_update:
+ /* Update Credit */
+ ol_txrx_update_group_credit(group, credit, absolute);
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+}
+#endif
+
#ifdef WLAN_FEATURE_FASTPATH
/**
* setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
@@ -404,6 +630,305 @@
#endif
+#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+
+/**
+ * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
+ * margin replenishment
+ * @desc_pool_size: tx desc pool size
+ *
+ * Return: threshold low
+ */
+static inline uint16_t
+ol_txrx_rsrc_threshold_lo(int desc_pool_size)
+{
+ int threshold_low;
+ /*
+ * 5% margin of unallocated desc is too much for per
+ * vdev mechanism.
+ * Define the value seperately.
+ */
+ threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
+
+ return threshold_low;
+}
+
+/**
+ * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
+ * during tx desc margin replenishment
+ * @desc_pool_size: tx desc pool size
+ *
+ * Return: threshold high
+ */
+static inline uint16_t
+ol_txrx_rsrc_threshold_hi(int desc_pool_size)
+{
+ int threshold_high;
+ /* when freeing up descriptors,
+ * keep going until there's a 7.5% margin
+ */
+ threshold_high = ((15 * desc_pool_size)/100)/2;
+
+ return threshold_high;
+}
+#else
+
+static inline uint16_t
+ol_txrx_rsrc_threshold_lo(int desc_pool_size)
+{
+ int threshold_low;
+ /* always maintain a 5% margin of unallocated descriptors */
+ threshold_low = (5 * desc_pool_size)/100;
+
+ return threshold_low;
+}
+
+static inline uint16_t
+ol_txrx_rsrc_threshold_hi(int desc_pool_size)
+{
+ int threshold_high;
+ /* when freeing up descriptors, keep going until
+ * there's a 15% margin
+ */
+ threshold_high = (15 * desc_pool_size)/100;
+
+ return threshold_high;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
+
+/**
+ * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_spinlock_create(&pdev->txq_log_spinlock);
+ pdev->txq_log.size = OL_TXQ_LOG_SIZE;
+ pdev->txq_log.oldest_record_offset = 0;
+ pdev->txq_log.offset = 0;
+ pdev->txq_log.allow_wrap = 1;
+ pdev->txq_log.wrapped = 0;
+}
+
+/**
+ * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+static inline void
+ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_spinlock_destroy(&pdev->txq_log_spinlock);
+}
+
+#else
+
+static inline void
+ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void
+ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+
+#endif
+
+#if defined(DEBUG_HL_LOGGING)
+
+/**
+ * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+static inline void
+ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_spinlock_create(&pdev->grp_stat_spinlock);
+ pdev->grp_stats.last_valid_index = -1;
+ pdev->grp_stats.wrap_around = 0;
+}
+
+/**
+ * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+static inline void
+ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
+{
+ qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
+}
+#else
+
+static inline void
+ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+static inline void
+ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+
+/**
+ * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
+ * @vdev: the virtual device object
+ * @flag: flag
+ *
+ * Return: None
+ */
+void
+ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
+{
+ vdev->hlTdlsFlag = flag;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
+{
+ u_int8_t i;
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ TAILQ_INIT(&vdev->txqs[i].head);
+ vdev->txqs[i].paused_count.total = 0;
+ vdev->txqs[i].frms = 0;
+ vdev->txqs[i].bytes = 0;
+ vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
+ vdev->txqs[i].flag = ol_tx_queue_empty;
+ /* aggregation is not applicable for vdev tx queues */
+ vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
+ ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
+ ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
+ }
+}
+
+/**
+ * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_tx_frms_queue_t *txq;
+ int i;
+
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ txq = &vdev->txqs[i];
+ ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS));
+ }
+}
+
+/**
+ * ol_txrx_peer_txqs_init() - initialise peer tx queues
+ * @pdev: the physical device object
+ * @peer: peer object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ uint8_t i;
+ struct ol_txrx_vdev_t *vdev = peer->vdev;
+ qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ TAILQ_INIT(&peer->txqs[i].head);
+ peer->txqs[i].paused_count.total = 0;
+ peer->txqs[i].frms = 0;
+ peer->txqs[i].bytes = 0;
+ peer->txqs[i].ext_tid = i;
+ peer->txqs[i].flag = ol_tx_queue_empty;
+ peer->txqs[i].aggr_state = ol_tx_aggr_untried;
+ ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
+ ol_txrx_set_txq_peer(&peer->txqs[i], peer);
+ }
+ qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+ /* aggregation is not applicable for mgmt and non-QoS tx queues */
+ for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
+ peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
+
+ ol_txrx_peer_pause(peer);
+}
+
+/**
+ * ol_txrx_peer_tx_queue_free() - free peer tx queues
+ * @pdev: the physical device object
+ * @peer: peer object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ struct ol_tx_frms_queue_t *txq;
+ uint8_t i;
+
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ txq = &peer->txqs[i];
+ ol_tx_queue_free(pdev, txq, i);
+ }
+}
+#else
+
+static inline void
+ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
+{
+ return;
+}
+
+static inline void
+ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
+{
+ return;
+}
+
+static inline void
+ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ return;
+}
+
+static inline void
+ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ return;
+}
+#endif
+
/**
* ol_txrx_pdev_attach() - allocate txrx pdev
* @ctrl_pdev: cfg pdev
@@ -425,6 +950,8 @@
goto fail0;
qdf_mem_zero(pdev, sizeof(*pdev));
+ /* init LL/HL cfg here */
+ pdev->cfg.is_high_latency = ol_cfg_is_high_latency(ctrl_pdev);
pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
/* store provided params */
@@ -442,16 +969,33 @@
if (ol_txrx_peer_find_attach(pdev))
goto fail1;
+ /* initialize the counter of the target's tx buffer availability */
+ qdf_atomic_init(&pdev->target_tx_credit);
+ qdf_atomic_init(&pdev->orig_target_tx_credit);
+
+ if (ol_cfg_is_high_latency(ctrl_pdev)) {
+ qdf_spinlock_create(&pdev->tx_queue_spinlock);
+ pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
+ if (pdev->tx_sched.scheduler == NULL)
+ goto fail2;
+ }
+ ol_txrx_pdev_txq_log_init(pdev);
+ ol_txrx_pdev_grp_stats_init(pdev);
+
pdev->htt_pdev =
htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
if (!pdev->htt_pdev)
- goto fail2;
+ goto fail3;
return pdev;
-fail2:
+fail3:
ol_txrx_peer_find_detach(pdev);
+fail2:
+ if (ol_cfg_is_high_latency(ctrl_pdev))
+ qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
+
fail1:
qdf_mem_free(pdev);
@@ -538,18 +1082,31 @@
* run out of tx descriptors.
*/
- /* initialize the counter of the target's tx buffer availability */
- qdf_atomic_init(&pdev->target_tx_credit);
- qdf_atomic_init(&pdev->orig_target_tx_credit);
/*
* LL - initialize the target credit outselves.
- * HL - wait for a HTT target credit initialization during htt_attach.
+ * HL - wait for a HTT target credit initialization
+ * during htt_attach.
*/
+ if (pdev->cfg.is_high_latency) {
+ desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
- qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
- &pdev->target_tx_credit);
+ qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
+ qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
- desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
+ pdev->tx_queue.rsrc_threshold_lo =
+ ol_txrx_rsrc_threshold_lo(desc_pool_size);
+ pdev->tx_queue.rsrc_threshold_hi =
+ ol_txrx_rsrc_threshold_hi(desc_pool_size);
+
+ for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
+ qdf_atomic_init(&pdev->txq_grps[i].credit);
+
+ ol_tx_target_credit_init(pdev, desc_pool_size);
+ } else {
+ qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
+ &pdev->target_tx_credit);
+ desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
+ }
ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
@@ -644,6 +1201,7 @@
"%s:%d - %d FRAG VA 0x%p FRAG PA 0x%llx",
__func__, __LINE__, i,
c_element->tx_desc.htt_frag_desc,
+ (long long unsigned int)
c_element->tx_desc.htt_frag_desc_paddr);
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
c_element->tx_desc.pkt_type = 0xff;
@@ -884,6 +1442,9 @@
pdev->cfg.ll_pause_txq_limit =
ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
+ /* TX flow control for peer who is in very bad link status */
+ ol_tx_badpeer_flow_cl_init(pdev);
+
#ifdef QCA_COMPUTE_TX_DELAY
qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
qdf_spinlock_create(&pdev->tx_delay.mutex);
@@ -1016,6 +1577,10 @@
htt_pktlogmod_exit(pdev, osc);
OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
+
+ if (pdev->cfg.is_high_latency)
+ ol_tx_sched_detach(pdev);
+
#ifdef QCA_SUPPORT_TX_THROTTLE
/* Thermal Mitigation */
qdf_timer_stop(&pdev->tx_throttle.phase_timer);
@@ -1094,10 +1659,17 @@
/* Thermal Mitigation */
qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
#endif
+
+ /* TX flow control for peer who is in very bad link status */
+ ol_tx_badpeer_flow_cl_deinit(pdev);
+
OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
OL_RX_REORDER_TRACE_DETACH(pdev);
OL_RX_PN_TRACE_DETACH(pdev);
+
+ ol_txrx_pdev_txq_log_destroy(pdev);
+ ol_txrx_pdev_grp_stat_destroy(pdev);
/*
* WDI event detach
*/
@@ -1109,6 +1681,28 @@
#endif
}
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+
+/**
+ * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+static inline void
+ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
+{
+ qdf_atomic_init(&vdev->tx_desc_count);
+}
+#else
+
+static inline void
+ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
+{
+ return;
+}
+#endif
+
/**
* ol_txrx_vdev_attach - Allocate and initialize the data object
* for a new virtual device.
@@ -1148,17 +1742,23 @@
vdev->fwd_tx_packets = 0;
vdev->fwd_rx_packets = 0;
+ ol_txrx_vdev_tx_desc_cnt_init(vdev);
+
qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
OL_TXRX_MAC_ADDR_LEN);
TAILQ_INIT(&vdev->peer_list);
vdev->last_real_peer = NULL;
+ ol_txrx_hl_tdls_flag_reset(vdev, false);
+
#ifdef QCA_IBSS_SUPPORT
vdev->ibss_peer_num = 0;
vdev->ibss_peer_heart_beat_timer = 0;
#endif
+ ol_txrx_vdev_txqs_init(vdev);
+
qdf_spinlock_create(&vdev->ll_pause.mutex);
vdev->ll_pause.paused_reason = 0;
vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
@@ -1304,6 +1904,8 @@
/* preconditions */
TXRX_ASSERT2(vdev);
+ ol_txrx_vdev_tx_queue_free(vdev);
+
qdf_spin_lock_bh(&vdev->ll_pause.mutex);
qdf_timer_stop(&vdev->ll_pause.timer);
qdf_timer_free(&vdev->ll_pause.timer);
@@ -1337,8 +1939,7 @@
if (!TAILQ_EMPTY(&vdev->peer_list)) {
/* debug print - will be removed later */
TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
- "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)"
- "until deletion finishes for all its peers\n",
+ "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
__func__, vdev,
vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
@@ -1379,7 +1980,7 @@
* Return: None
*/
void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
- bool drop)
+ bool drop)
{
struct ol_rx_cached_buf *cache_buf;
QDF_STATUS ret;
@@ -1510,6 +2111,8 @@
qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
OL_TXRX_MAC_ADDR_LEN);
+ ol_txrx_peer_txqs_init(pdev, peer);
+
INIT_LIST_HEAD(&peer->cached_bufq);
qdf_spin_lock_bh(&pdev->peer_ref_mutex);
/* add this peer into the vdev's list */
@@ -1920,10 +2523,11 @@
peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
if (NULL == peer) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null for peer_mac"
- " 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __FUNCTION__,
- peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
- peer_mac[4], peer_mac[5]);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
+ "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __func__,
+ peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
+ peer_mac[4], peer_mac[5]);
return QDF_STATUS_E_INVAL;
}
@@ -2213,6 +2817,8 @@
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
}
+ ol_txrx_peer_tx_queue_free(pdev, peer);
+
/*
* 'array' is allocated in addba handler and is supposed to be
* freed in delba handler. There is the case (for example, in
@@ -2375,11 +2981,14 @@
struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
uint32_t total;
- total = ol_tx_get_desc_global_pool_size(pdev);
+ if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
+ total = qdf_atomic_read(&pdev->orig_target_tx_credit);
+ else
+ total = ol_tx_get_desc_global_pool_size(pdev);
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "total tx credit %d num_free %d",
- total, pdev->tx_desc.num_free);
+ "total tx credit %d num_free %d",
+ total, pdev->tx_desc.num_free);
return;
}
@@ -2410,7 +3019,7 @@
qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
if (timeout <= 0) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s: tx frames are pending", __func__);
+ "%s: tx frames are pending", __func__);
ol_txrx_dump_tx_desc(txrx_pdev);
return QDF_STATUS_E_TIMEOUT;
}
@@ -2495,7 +3104,10 @@
struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
uint32_t total;
- total = ol_tx_get_desc_global_pool_size(pdev);
+ if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
+ total = qdf_atomic_read(&pdev->orig_target_tx_credit);
+ else
+ total = ol_tx_get_desc_global_pool_size(pdev);
return total - ol_tx_get_total_free_desc(pdev);
}
@@ -2809,8 +3421,7 @@
ol_txrx_pdev_display(vdev->pdev, 0);
#else
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
- "The pdev,vdev,peer display functions are disabled.\n"
- "To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
+ "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
#endif
}
if (debug_specs & TXRX_DBG_MASK_STATS) {
@@ -2821,9 +3432,7 @@
ol_txrx_prot_ans_display(vdev->pdev);
#else
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
- "txrx protocol analysis is disabled.\n"
- "To enable it, recompile with "
- "ENABLE_TXRX_PROT_ANALYZE defined");
+ "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
#endif
}
if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
@@ -2831,9 +3440,7 @@
ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
#else
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
- "rx reorder seq num trace is disabled.\n"
- "To enable it, recompile with "
- "ENABLE_RX_REORDER_TRACE defined");
+ "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
#endif
}
@@ -2860,7 +3467,7 @@
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
"%*svdev list:", indent + 4, " ");
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
- ol_txrx_vdev_display(vdev, indent + 8);
+ ol_txrx_vdev_display(vdev, indent + 8);
}
ol_txrx_peer_find_display(pdev, indent + 4);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
@@ -2887,7 +3494,7 @@
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
"%*speer list:", indent + 4, " ");
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
- ol_txrx_peer_display(peer, indent + 8);
+ ol_txrx_peer_display(peer, indent + 8);
}
}
@@ -2976,23 +3583,20 @@
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
if (!vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: vdev is NULL", __func__);
+ "%s: vdev is NULL", __func__);
snprintf(buffer, buf_len, "vdev not found");
return len;
}
len = scnprintf(buffer, buf_len,
- "\nTXRX stats:\n"
- "\nllQueue State : %s"
- "\n pause %u unpause %u"
- "\n overflow %u"
- "\nllQueue timer state : %s\n",
- ((vdev->ll_pause.is_q_paused == false) ? "UNPAUSED" : "PAUSED"),
- vdev->ll_pause.q_pause_cnt,
- vdev->ll_pause.q_unpause_cnt,
- vdev->ll_pause.q_overflow_cnt,
- ((vdev->ll_pause.is_q_timer_on == false)
- ? "NOT-RUNNING" : "RUNNING"));
+ "\nTXRX stats:\n\nllQueue State : %s\n pause %u unpause %u\n overflow %u\n llQueue timer state : %s\n",
+ ((vdev->ll_pause.is_q_paused == false) ?
+ "UNPAUSED" : "PAUSED"),
+ vdev->ll_pause.q_pause_cnt,
+ vdev->ll_pause.q_unpause_cnt,
+ vdev->ll_pause.q_overflow_cnt,
+ ((vdev->ll_pause.is_q_timer_on == false)
+ ? "NOT-RUNNING" : "RUNNING"));
return len;
}
@@ -3143,14 +3747,14 @@
if (sta_id >= WLAN_MAX_STA_COUNT) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "Invalid sta id passed");
+ "Invalid sta id passed");
return NULL;
}
pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "PDEV not found for sta_id [%d]", sta_id);
+ "PDEV not found for sta_id [%d]", sta_id);
return NULL;
}
@@ -3158,7 +3762,7 @@
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "PEER [%d] not found", sta_id);
+ "PEER [%d] not found", sta_id);
return NULL;
}
@@ -3174,13 +3778,13 @@
* Return: 0 for sucess or error code
*/
int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
- ol_txrx_tx_flow_control_fp flowControl,
- void *osif_fc_ctx)
+ ol_txrx_tx_flow_control_fp flowControl,
+ void *osif_fc_ctx)
{
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
if (NULL == vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid vdev_id %d", __func__, vdev_id);
+ "%s: Invalid vdev_id %d", __func__, vdev_id);
return -EINVAL;
}
@@ -3202,7 +3806,7 @@
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
if (NULL == vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid vdev_id", __func__);
+ "%s: Invalid vdev_id", __func__);
return -EINVAL;
}
@@ -3229,7 +3833,7 @@
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
if (NULL == vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid sta_id %d", __func__, sta_id);
+ "%s: Invalid sta_id %d", __func__, sta_id);
/* Return true so caller do not understand that resource
* is less than low_watermark.
* sta_id validation will be done in ol_tx_send_data_frame
@@ -3240,6 +3844,7 @@
}
qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+
if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
vdev->tx_fl_lwm = (uint16_t) low_watermark;
vdev->tx_fl_hwm =
@@ -3266,7 +3871,7 @@
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
if (NULL == vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid vdev_id %d", __func__, vdev_id);
+ "%s: Invalid vdev_id %d", __func__, vdev_id);
return -EINVAL;
}
@@ -3285,7 +3890,7 @@
* Return: none
*/
inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
- bool tx_resume)
+ bool tx_resume)
{
qdf_spin_lock_bh(&vdev->flow_control_lock);
if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
@@ -3429,7 +4034,7 @@
pkt = cds_alloc_ol_rx_pkt(sched_ctx);
if (qdf_unlikely(!pkt)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Not able to allocate context", __func__);
+ "%s: Not able to allocate context", __func__);
return;
}
@@ -3488,7 +4093,7 @@
pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: pdev is NULL", __func__);
+ "%s: pdev is NULL", __func__);
return;
}
@@ -3505,6 +4110,26 @@
case WLAN_TXRX_DESC_STATS:
qdf_nbuf_tx_desc_count_display();
break;
+#ifdef CONFIG_HL_SUPPORT
+ case WLAN_SCHEDULER_STATS:
+ ol_tx_sched_cur_state_display(pdev);
+ ol_tx_sched_stats_display(pdev);
+ break;
+ case WLAN_TX_QUEUE_STATS:
+ ol_tx_queue_log_display(pdev);
+ break;
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+ case WLAN_CREDIT_STATS:
+ ol_tx_dump_group_credit_stats(pdev);
+ break;
+#endif
+
+#ifdef DEBUG_HL_LOGGING
+ case WLAN_BUNDLE_STATS:
+ htt_dump_bundle_stats(pdev->htt_pdev);
+ break;
+#endif
+#endif
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Unknown value", __func__);
@@ -3519,7 +4144,7 @@
pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: pdev is NULL", __func__);
+ "%s: pdev is NULL", __func__);
return;
}
@@ -3533,6 +4158,22 @@
case WLAN_TXRX_DESC_STATS:
qdf_nbuf_tx_desc_count_clear();
break;
+#ifdef CONFIG_HL_SUPPORT
+ case WLAN_SCHEDULER_STATS:
+ ol_tx_sched_stats_clear(pdev);
+ break;
+ case WLAN_TX_QUEUE_STATS:
+ ol_tx_queue_log_clear(pdev);
+ break;
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+ case WLAN_CREDIT_STATS:
+ ol_tx_clear_group_credit_stats(pdev);
+ break;
+#endif
+ case WLAN_BUNDLE_STATS:
+ htt_clear_bundle_stats(pdev->htt_pdev);
+ break;
+#endif
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Unknown value", __func__);
@@ -3692,7 +4333,7 @@
pkt = cds_alloc_ol_rx_pkt(sched_ctx);
if (!pkt) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "No available Rx message buffer");
+ "No available Rx message buffer");
goto drop_rx_buf;
}
pkt->callback = (cds_ol_rx_thread_cb)
@@ -3889,14 +4530,14 @@
* Return: none
*/
void ol_txrx_lro_flush_handler(void *context,
- void *rxpkt,
- uint16_t staid)
+ void *rxpkt,
+ uint16_t staid)
{
ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
if (qdf_unlikely(!pdev)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid context", __func__);
+ "%s: Invalid context", __func__);
qdf_assert(0);
return;
}
@@ -3905,7 +4546,7 @@
pdev->lro_info.lro_flush_cb(pdev->lro_info.lro_data);
else
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: lro_flush_cb NULL", __func__);
+ "%s: lro_flush_cb NULL", __func__);
}
/**
@@ -3932,7 +4573,7 @@
pkt = cds_alloc_ol_rx_pkt(sched_ctx);
if (qdf_unlikely(!pkt)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
- "%s: Not able to allocate context", __func__);
+ "%s: Not able to allocate context", __func__);
return;
}
diff --git a/core/dp/txrx/ol_txrx.h b/core/dp/txrx/ol_txrx.h
index 791a0ec..e02647e 100644
--- a/core/dp/txrx/ol_txrx.h
+++ b/core/dp/txrx/ol_txrx.h
@@ -34,10 +34,100 @@
void ol_txrx_peer_unref_delete(struct ol_txrx_peer_t *peer);
+/**
+ * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems
+ * @ctrl_pdev: the control pdev handle
+ *
+ * Return: allocated pool size
+ */
+u_int16_t
+ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
+
#ifndef OL_TX_AVG_FRM_BYTES
#define OL_TX_AVG_FRM_BYTES 1000
#endif
+#ifndef OL_TX_DESC_POOL_SIZE_MIN_HL
+#define OL_TX_DESC_POOL_SIZE_MIN_HL 500
+#endif
+
+#ifndef OL_TX_DESC_POOL_SIZE_MAX_HL
+#define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
+#endif
+
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+#define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
+#define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
+#endif
+
+#ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
+#define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+
+void
+ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag);
+#else
+
+static inline void
+ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
+{
+ return;
+}
+#endif
+
+#ifdef CONFIG_HL_SUPPORT
+
+void
+ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr);
+
+void
+ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev,
+ uint8_t *peer_id);
+
+bool
+is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer);
+
+void
+ol_txrx_update_last_real_peer(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ uint8_t *peer_id, bool restore_last_peer);
+#else
+
+static inline void
+ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
+{
+ return;
+}
+
+static inline void
+ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev, uint8_t *peer_id)
+{
+ return;
+}
+
+static inline bool
+is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
+{
+ return false;
+}
+
+static inline void
+ol_txrx_update_last_real_peer(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ uint8_t *peer_id, bool restore_last_peer)
+
+{
+ return;
+}
+#endif
+
ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn);
diff --git a/core/dp/txrx/ol_txrx_internal.h b/core/dp/txrx/ol_txrx_internal.h
index 25cf2a0..e846322 100644
--- a/core/dp/txrx/ol_txrx_internal.h
+++ b/core/dp/txrx/ol_txrx_internal.h
@@ -559,6 +559,36 @@
is_mcast); \
} while (false)
+#ifdef CONFIG_HL_SUPPORT
+
+ /**
+ * ol_rx_err_inv_get_wifi_header() - retrieve wifi header
+ * @pdev: handle to the physical device
+ * @rx_msdu: msdu of which header needs to be retrieved
+ *
+ * Return: wifi header
+ */
+ static inline
+ struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
+ struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
+ {
+ return NULL;
+ }
+#else
+
+ static inline
+ struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
+ struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
+ {
+ struct ieee80211_frame *wh = NULL;
+ if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi)
+ /* For windows, it is always native wifi header .*/
+ wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu);
+
+ return wh;
+ }
+#endif
+
#define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
do { \
struct ieee80211_frame *wh = NULL; \
@@ -568,11 +598,7 @@
/*wh = (struct ieee80211_frame *) */ \
/*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
/* this only apply to LL device.*/ \
- if (ol_cfg_frame_type(pdev->ctrl_pdev) == \
- wlan_frm_fmt_native_wifi) { \
- /* For windows, it is always native wifi header .*/ \
- wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu); \
- } \
+ wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \
ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
wh, OL_RX_ERR_UNKNOWN_PEER); \
} while (false)
@@ -734,4 +760,13 @@
#endif /* FEATURE_TSO_DEBUG */
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+void
+ol_txrx_update_group_credit(
+ struct ol_tx_queue_group_t *group,
+ int32_t credit,
+ u_int8_t absolute);
+#endif
+
#endif /* _OL_TXRX_INTERNAL__H_ */
diff --git a/core/dp/txrx/ol_txrx_peer_find.c b/core/dp/txrx/ol_txrx_peer_find.c
index 072d2eb..6f13af5 100644
--- a/core/dp/txrx/ol_txrx_peer_find.c
+++ b/core/dp/txrx/ol_txrx_peer_find.c
@@ -398,17 +398,85 @@
/*=== function definitions for message handling =============================*/
+#if defined(CONFIG_HL_SUPPORT)
+
void
ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
uint16_t peer_id,
uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
{
ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
+ if (!tx_ready) {
+ struct ol_txrx_peer_t *peer;
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (!peer) {
+ /* ol_txrx_peer_detach called before peer map arrived*/
+ return;
+ } else {
+ if (tx_ready) {
+ int i;
+ /* unpause all tx queues now, since the
+ * target is ready
+ */
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
+ i++)
+ ol_txrx_peer_tid_unpause(peer, i);
+
+ } else {
+ /* walk through paused mgmt queue,
+ * update tx descriptors
+ */
+ ol_tx_queue_decs_reinit(peer, peer_id);
+
+ /* keep non-mgmt tx queues paused until assoc
+ * is finished tx queues were paused in
+ * ol_txrx_peer_attach*/
+ /* unpause tx mgmt queue */
+ ol_txrx_peer_tid_unpause(peer,
+ HTT_TX_EXT_TID_MGMT);
+ }
+ }
+ }
}
void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
{
+ struct ol_txrx_peer_t *peer;
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer) {
+ int i;
+ /*
+ * Unpause all data tx queues now that the target is ready.
+ * The mgmt tx queue was not paused, so skip it.
+ */
+ for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
+ if (i == HTT_TX_EXT_TID_MGMT)
+ continue; /* mgmt tx queue was not paused */
+
+ ol_txrx_peer_tid_unpause(peer, i);
+ }
+ }
}
+#else
+
+void
+ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t vdev_id,
+ uint8_t *peer_mac_addr,
+ int tx_ready)
+{
+ ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
+
+}
+
+void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
+{
+ return;
+}
+
+#endif
+
void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
{
diff --git a/core/dp/txrx/ol_txrx_types.h b/core/dp/txrx/ol_txrx_types.h
index 0427152..25cdcff 100644
--- a/core/dp/txrx/ol_txrx_types.h
+++ b/core/dp/txrx/ol_txrx_types.h
@@ -123,6 +123,42 @@
OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
};
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
+
+enum ol_tx_peer_bal_state {
+ ol_tx_peer_bal_enable = 0,
+ ol_tx_peer_bal_disable,
+};
+
+enum ol_tx_peer_bal_timer_state {
+ ol_tx_peer_bal_timer_disable = 0,
+ ol_tx_peer_bal_timer_active,
+ ol_tx_peer_bal_timer_inactive,
+};
+
+struct ol_tx_limit_peer_t {
+ u_int16_t limit_flag;
+ u_int16_t peer_id;
+ u_int16_t limit;
+};
+
+enum tx_peer_level {
+ TXRX_IEEE11_B = 0,
+ TXRX_IEEE11_A_G,
+ TXRX_IEEE11_N,
+ TXRX_IEEE11_AC,
+ TXRX_IEEE11_MAX,
+};
+
+struct tx_peer_threshold {
+ u_int32_t tput_thresh;
+ u_int32_t tx_limit;
+};
+#endif
+
+
struct ol_tx_desc_t {
qdf_nbuf_t netbuf;
void *htt_tx_desc;
@@ -151,11 +187,17 @@
* This field is filled in with the ol_tx_frm_type enum.
*/
uint8_t pkt_type;
+#if defined(CONFIG_HL_SUPPORT)
+ struct ol_txrx_vdev_t *vdev;
+#endif
+ void *txq;
+
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
/* used by tx encap, to restore the os buf start offset
after tx complete */
uint8_t orig_l2_hdr_bytes;
#endif
+
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
struct ol_tx_flow_pool_t *pool;
#endif
@@ -197,6 +239,19 @@
(((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
TXRX_WMM_AC_BE)
+enum {
+ OL_TX_SCHED_WRR_ADV_CAT_BE,
+ OL_TX_SCHED_WRR_ADV_CAT_BK,
+ OL_TX_SCHED_WRR_ADV_CAT_VI,
+ OL_TX_SCHED_WRR_ADV_CAT_VO,
+ OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
+ OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
+ OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
+ OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
+
+ OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
+};
+
struct ol_tx_reorder_cat_timeout_t {
TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
qdf_timer_t timer;
@@ -204,6 +259,11 @@
struct ol_txrx_pdev_t *pdev;
};
+enum ol_tx_scheduler_status {
+ ol_tx_scheduler_idle = 0,
+ ol_tx_scheduler_running,
+};
+
enum ol_tx_queue_status {
ol_tx_queue_empty = 0,
ol_tx_queue_active,
@@ -224,6 +284,19 @@
ol_tx_aggr_in_progress,
};
+#define OL_TX_MAX_GROUPS_PER_QUEUE 1
+#define OL_TX_MAX_VDEV_ID 16
+#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
+ (((_membership) & 0xffff0000) >> 16)
+#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
+ ((_mask >> _vdev_id) & 0x01)
+#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
+ ((_membership) & 0x0000ffff)
+#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
+ ((_mask >> _ac_mask) & 0x01)
+#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
+ ((_vdev_mask << 16) | _ac_mask)
+
struct ol_tx_frms_queue_t {
/* list_elem -
* Allow individual tx frame queues to be linked together into
@@ -241,6 +314,10 @@
uint32_t bytes;
ol_tx_desc_list head;
enum ol_tx_queue_status flag;
+ struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+ struct ol_txrx_peer_t *peer;
+#endif
};
enum {
@@ -272,6 +349,9 @@
uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
};
+struct ol_tx_sched_t;
+
+
#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
#endif
@@ -322,6 +402,24 @@
typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
+struct ol_tx_queue_group_t {
+ qdf_atomic_t credit;
+ u_int32_t membership;
+};
+#define OL_TX_MAX_TXQ_GROUPS 2
+
+#define OL_TX_GROUP_STATS_LOG_SIZE 128
+struct ol_tx_group_credit_stats_t {
+ struct {
+ struct {
+ u_int16_t member_vdevs;
+ u_int16_t credit;
+ } grp[OL_TX_MAX_TXQ_GROUPS];
+ } stats[OL_TX_GROUP_STATS_LOG_SIZE];
+ u_int16_t last_valid_index;
+ u_int16_t wrap_around;
+};
+
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
/**
@@ -558,6 +656,7 @@
struct {
uint16_t pool_size;
uint16_t num_free;
+ union ol_tx_desc_list_elem_t *array;
union ol_tx_desc_list_elem_t *freelist;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
uint8_t num_invalid_bin;
@@ -677,6 +776,24 @@
#endif
/*
+ * tx_sched only applies for HL, but is defined unconditionally
+ * rather than only if defined(CONFIG_HL_SUPPORT).
+ * This is because the struct only
+ * occupies a few bytes, and to avoid the complexity of
+ * wrapping references
+ * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
+ * compilation.
+ * If this struct gets expanded to a non-trivial size,
+ * then it should be
+ * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
+ */
+ qdf_spinlock_t tx_queue_spinlock;
+ struct {
+ enum ol_tx_scheduler_status tx_sched_status;
+ struct ol_tx_sched_t *scheduler;
+ struct ol_tx_frms_queue_t *last_used_txq;
+ } tx_sched;
+ /*
* tx_queue only applies for HL, but is defined unconditionally to avoid
* wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
* conditional compilation.
@@ -690,6 +807,20 @@
uint16_t rsrc_threshold_hi;
} tx_queue;
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+#define OL_TXQ_LOG_SIZE 512
+ qdf_spinlock_t txq_log_spinlock;
+ struct {
+ int size;
+ int oldest_record_offset;
+ int offset;
+ int allow_wrap;
+ u_int32_t wrapped;
+ /* aligned to u_int32_t boundary */
+ u_int8_t data[OL_TXQ_LOG_SIZE];
+ } txq_log;
+#endif
+
#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
qdf_spinlock_t peer_stat_mutex;
#endif
@@ -767,6 +898,35 @@
OL_TX_MUTEX_TYPE tso_mutex;
} tso_seg_pool;
#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+ struct {
+ enum ol_tx_peer_bal_state enabled;
+ qdf_spinlock_t mutex;
+ /* timer used to trigger more frames for bad peers */
+ qdf_timer_t peer_bal_timer;
+ /*This is the time in ms of the peer balance timer period */
+ u_int32_t peer_bal_period_ms;
+ /*This is the txq limit */
+ u_int32_t peer_bal_txq_limit;
+ /*This is the state of the peer balance timer */
+ enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
+ /*This is the counter about active peers which are under
+ *tx flow control */
+ u_int32_t peer_num;
+ /*This is peer list which are under tx flow control */
+ struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
+ /*This is threshold configurationl */
+ struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
+ } tx_peer_bal;
+#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
+
+ struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
+#ifdef DEBUG_HL_LOGGING
+ qdf_spinlock_t grp_stat_spinlock;
+ struct ol_tx_group_credit_stats_t grp_stats;
+#endif
+ int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
uint8_t ocb_peer_valid;
struct ol_txrx_peer_t *ocb_peer;
ol_tx_pause_callback_fp pause_cb;
@@ -831,6 +991,10 @@
int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
#endif
+#if defined(CONFIG_HL_SUPPORT)
+ struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
+#endif
+
struct {
struct {
qdf_nbuf_t head;
@@ -854,6 +1018,16 @@
qdf_spinlock_t flow_control_lock;
ol_txrx_tx_flow_control_fp osif_flow_control_cb;
void *osif_fc_ctx;
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+ union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
+ bool hlTdlsFlag;
+#endif
+
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+ qdf_atomic_t tx_desc_count;
+#endif
+
uint16_t wait_on_peer_id;
qdf_event_t wait_delete_comp;
#if defined(FEATURE_TSO)
@@ -917,6 +1091,10 @@
typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
tx_msdu_info);
+#define OL_TXRX_PEER_SECURITY_MULTICAST 0
+#define OL_TXRX_PEER_SECURITY_UNICAST 1
+#define OL_TXRX_PEER_SECURITY_MAX 2
+
struct ol_txrx_peer_t {
struct ol_txrx_vdev_t *vdev;
@@ -978,6 +1156,10 @@
struct ol_txrx_peer_t *peer,
unsigned tid, qdf_nbuf_t msdu_list);
+#if defined(CONFIG_HL_SUPPORT)
+ struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
+#endif
+
#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
ol_txrx_peer_stats_t stats;
#endif
@@ -1013,6 +1195,11 @@
uint32_t last_pkt_tsf;
uint8_t last_pkt_tid;
uint16_t last_pkt_center_freq;
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+ u_int16_t tx_limit;
+ u_int16_t tx_limit_flag;
+ u_int16_t tx_pause_flag;
+#endif
qdf_time_t last_assoc_rcvd;
qdf_time_t last_disassoc_rcvd;
qdf_time_t last_deauth_rcvd;
diff --git a/target/inc/wlan_tgt_def_config_hl.h b/target/inc/wlan_tgt_def_config_hl.h
index 00cd9bf..92e8106 100644
--- a/target/inc/wlan_tgt_def_config_hl.h
+++ b/target/inc/wlan_tgt_def_config_hl.h
@@ -1,9 +1,6 @@
/*
* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
*
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all