blob: bac4798aad7f4d4eee5f57d95b39bfbcf7655c3e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Lin Bai1a73a412018-12-13 16:40:14 +08002 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053019#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053020#include <qdf_lock.h> /* qdf_os_spinlock */
Anurag Chouhan50220ce2016-02-18 20:11:33 +053021#include <qdf_time.h> /* qdf_system_ticks, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053022#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhanc73697b2016-02-21 15:05:43 +053023#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080024
Ashish Kumar Dhanotiya94ffbd12019-08-08 18:00:59 +053025#include "queue.h" /* TAILQ */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080026#ifdef QCA_COMPUTE_TX_DELAY
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027#include <enet.h> /* ethernet_hdr_t, etc. */
28#include <ipv6_defs.h> /* ipv6_traffic_class */
29#endif
30
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
32#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
33#include <ol_txrx_htt_api.h> /* htt_tx_status */
34
35#include <ol_ctrl_txrx_api.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070036#include <cdp_txrx_tx_delay.h>
37#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
39#ifdef QCA_COMPUTE_TX_DELAY
Siddarth Poddarb2011f62016-04-27 20:45:42 +053040#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#endif
42#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
43#include <ol_osif_txrx_api.h>
44#include <ol_tx.h> /* ol_tx_reinject */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070045#include <ol_tx_send.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47#include <ol_cfg.h> /* ol_cfg_is_high_latency */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053048#include <ol_tx_sched.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
50#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
51#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +053052#include <ol_tx_queue.h>
53#include <ol_txrx.h>
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +053054#include <pktlog_ac_fmt.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080055#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056
57#ifdef TX_CREDIT_RECLAIM_SUPPORT
58
59#define OL_TX_CREDIT_RECLAIM(pdev) \
60 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053061 if (qdf_atomic_read(&pdev->target_tx_credit) < \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062 ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
63 ol_osif_ath_tasklet(pdev->osdev); \
64 } \
65 } while (0)
66
67#else
68
69#define OL_TX_CREDIT_RECLAIM(pdev)
70
71#endif /* TX_CREDIT_RECLAIM_SUPPORT */
72
Siddarth Poddarb2011f62016-04-27 20:45:42 +053073#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
74
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075/*
76 * HL needs to keep track of the amount of credit available to download
77 * tx frames to the target - the download scheduler decides when to
78 * download frames, and which frames to download, based on the credit
79 * availability.
80 * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
81 * of the target_tx_credit, to determine when to poll for tx completion
82 * messages.
83 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053084static inline void
85ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
86{
87 qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
88}
89
90static inline void
91ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
92{
93 qdf_atomic_add(delta, &pdev->target_tx_credit);
94}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080095#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +053096
97static inline void
98ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
99{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530100}
101
102static inline void
103ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
104{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530105}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106#endif
107
Rakshith Suresh Patkar384a28a2018-11-02 16:43:43 +0530108#ifdef DESC_TIMESTAMP_DEBUG_INFO
109static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
110{
111 tx_desc->desc_debug_info.last_comp_ts = qdf_get_log_timestamp();
112}
113#else
114static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
115{
116}
117#endif
118
Ajit Pal Singhaa4a47f2018-04-23 14:11:44 +0530119#if defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
Nirav Shahc4aa1ab2018-04-21 12:38:44 +0530120void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530121{
122 struct ol_txrx_vdev_t *vdev;
Nirav Shahfb9b1df2019-11-15 11:40:52 +0530123 bool trigger_unpause = false;
Yun Parkf9677152017-04-08 13:29:34 -0700124
Ajit Pal Singhaa4a47f2018-04-23 14:11:44 +0530125 qdf_spin_lock_bh(&pdev->tx_mutex);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530126 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Ajit Pal Singhaa4a47f2018-04-23 14:11:44 +0530127 if (vdev->tx_desc_limit == 0)
128 continue;
129
130 /* un-pause high priority queue */
131 if (vdev->prio_q_paused &&
132 (qdf_atomic_read(&vdev->tx_desc_count)
133 < vdev->tx_desc_limit)) {
134 pdev->pause_cb(vdev->vdev_id,
135 WLAN_NETIF_PRIORITY_QUEUE_ON,
136 WLAN_DATA_FLOW_CONTROL_PRIORITY);
137 vdev->prio_q_paused = 0;
138 }
139 /* un-pause non priority queues */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530140 if (qdf_atomic_read(&vdev->os_q_paused) &&
Ajit Pal Singhaa4a47f2018-04-23 14:11:44 +0530141 (qdf_atomic_read(&vdev->tx_desc_count)
142 <= vdev->queue_restart_th)) {
143 pdev->pause_cb(vdev->vdev_id,
144 WLAN_WAKE_NON_PRIORITY_QUEUE,
145 WLAN_DATA_FLOW_CONTROL);
146 qdf_atomic_set(&vdev->os_q_paused, 0);
Nirav Shahfb9b1df2019-11-15 11:40:52 +0530147 trigger_unpause = true;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530148 }
149 }
Ajit Pal Singhaa4a47f2018-04-23 14:11:44 +0530150 qdf_spin_unlock_bh(&pdev->tx_mutex);
Nirav Shahfb9b1df2019-11-15 11:40:52 +0530151 if (trigger_unpause)
152 ol_tx_hl_pdev_queue_send_all(pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530153}
Nirav Shahc4aa1ab2018-04-21 12:38:44 +0530154#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155
156static inline uint16_t
157ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530158 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159{
160 int msdu_credit_consumed;
161
Nirav Shahcbc6d722016-03-01 16:24:53 +0530162 TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163 TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530164 qdf_atomic_read(&pdev->target_tx_credit),
165 qdf_atomic_read(&pdev->target_tx_credit) - 1,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530166 qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167
168 msdu_credit_consumed = htt_tx_msdu_credit(msdu);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530169 ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170 OL_TX_CREDIT_RECLAIM(pdev);
171
172 /*
173 * When the tx frame is downloaded to the target, there are two
174 * outstanding references:
175 * 1. The host download SW (HTT, HTC, HIF)
176 * This reference is cleared by the ol_tx_send_done callback
177 * functions.
178 * 2. The target FW
179 * This reference is cleared by the ol_tx_completion_handler
180 * function.
181 * It is extremely probable that the download completion is processed
182 * before the tx completion message. However, under exceptional
183 * conditions the tx completion may be processed first. Thus, rather
184 * that assuming that reference (1) is done before reference (2),
185 * explicit reference tracking is needed.
186 * Double-increment the ref count to account for both references
187 * described above.
188 */
189
190 OL_TX_DESC_REF_INIT(tx_desc);
191 OL_TX_DESC_REF_INC(tx_desc);
192 OL_TX_DESC_REF_INC(tx_desc);
193
194 return msdu_credit_consumed;
195}
196
197void
198ol_tx_send(struct ol_txrx_pdev_t *pdev,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530199 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200{
201 int msdu_credit_consumed;
202 uint16_t id;
203 int failed;
204
205 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
206 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530207 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Nirav Shah07e39a62016-04-25 17:46:40 +0530208 DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700209 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah07e39a62016-04-25 17:46:40 +0530210 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530211 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
212 vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213 failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530214 if (qdf_unlikely(failed)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530215 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800216 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
217 }
218}
219
220void
221ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530222 qdf_nbuf_t head_msdu, int num_msdus)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530224 qdf_nbuf_t rejected;
Yun Parkf9677152017-04-08 13:29:34 -0700225
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 OL_TX_CREDIT_RECLAIM(pdev);
227
228 rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530229 while (qdf_unlikely(rejected)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 struct ol_tx_desc_t *tx_desc;
231 uint16_t *msdu_id_storage;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530232 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233
Nirav Shahcbc6d722016-03-01 16:24:53 +0530234 next = qdf_nbuf_next(rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235 msdu_id_storage = ol_tx_msdu_id_storage(rejected);
236 tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
237
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530238 ol_tx_target_credit_incr(pdev, rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800239 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
240
241 rejected = next;
242 }
243}
244
245void
246ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
247 struct ol_tx_desc_t *tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530248 qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249{
250 int msdu_credit_consumed;
251 uint16_t id;
252 int failed;
253
254 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
255 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530256 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800257 failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
258 if (failed) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530259 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260 "Error: freeing tx frame after htt_tx failed");
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530261 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800262 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
263 }
264}
265
Nirav Shahfc0848c2019-10-16 11:46:29 +0530266static inline bool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530268 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800269{
270 struct ol_tx_desc_t *tx_desc;
Nirav Shahfc0848c2019-10-16 11:46:29 +0530271 bool is_frame_freed = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272
273 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530274 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800275
276 /*
277 * If the download is done for
278 * the Management frame then
279 * call the download callback if registered
280 */
281 if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800282 ol_txrx_mgmt_tx_cb download_cb =
Sravan Kumar Kairam905b4c52017-10-17 19:38:14 +0530283 pdev->tx_mgmt_cb.download_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 if (download_cb) {
Sravan Kumar Kairam905b4c52017-10-17 19:38:14 +0530285 download_cb(pdev->tx_mgmt_cb.ctxt,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286 tx_desc->netbuf, status != A_OK);
287 }
288 }
289
290 if (status != A_OK) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530291 ol_tx_target_credit_incr(pdev, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
293 1 /* download err */);
Nirav Shahfc0848c2019-10-16 11:46:29 +0530294 is_frame_freed = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295 } else {
296 if (OL_TX_DESC_NO_REFS(tx_desc)) {
297 /*
298 * The decremented value was zero - free the frame.
299 * Use the tx status recorded previously during
300 * tx completion handling.
301 */
302 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
303 tx_desc->status !=
304 htt_tx_status_ok);
Nirav Shahfc0848c2019-10-16 11:46:29 +0530305 is_frame_freed = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 }
307 }
Nirav Shahfc0848c2019-10-16 11:46:29 +0530308 return is_frame_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309}
310
311void
312ol_tx_download_done_ll(void *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530313 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314{
315 ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
316 msdu_id);
317}
318
319void
320ol_tx_download_done_hl_retain(void *txrx_pdev,
321 A_STATUS status,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530322 qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323{
324 struct ol_txrx_pdev_t *pdev = txrx_pdev;
Yun Parkf9677152017-04-08 13:29:34 -0700325
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
327}
328
329void
330ol_tx_download_done_hl_free(void *txrx_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530331 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332{
333 struct ol_txrx_pdev_t *pdev = txrx_pdev;
334 struct ol_tx_desc_t *tx_desc;
Nirav Shahfc0848c2019-10-16 11:46:29 +0530335 bool is_frame_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800336
337 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530338 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339
Nirav Shah38ccf5b2019-07-03 10:22:11 +0530340 DPTRACE(qdf_dp_trace_ptr(msdu,
341 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
342 QDF_TRACE_DEFAULT_PDEV_ID,
343 qdf_nbuf_data_addr(msdu),
344 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
345 status));
346
Nirav Shahfc0848c2019-10-16 11:46:29 +0530347 is_frame_freed = ol_tx_download_done_base(pdev, status, msdu, msdu_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348
Ajit Pal Singhcdbe69a2019-06-06 11:24:23 +0530349 /*
Nirav Shahfc0848c2019-10-16 11:46:29 +0530350 * if frame is freed in ol_tx_download_done_base then return.
Ajit Pal Singhcdbe69a2019-06-06 11:24:23 +0530351 */
Nirav Shahfc0848c2019-10-16 11:46:29 +0530352 if (is_frame_freed) {
Ajit Pal Singhcdbe69a2019-06-06 11:24:23 +0530353 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
354 return;
355 }
356
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700357 if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358 (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530359 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
361 }
362}
363
364void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
365{
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530366 qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367}
368
369void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
370{
371 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530372 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 credit_delta,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530374 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 credit_delta);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530376 qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377}
378
379#ifdef QCA_COMPUTE_TX_DELAY
380
381static void
382ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
383 enum htt_tx_status status,
384 uint16_t *desc_ids, int num_msdus);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530385
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530387static inline void
388ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
389 enum htt_tx_status status,
390 uint16_t *desc_ids, int num_msdus)
391{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530392}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393#endif /* QCA_COMPUTE_TX_DELAY */
394
Ajit Pal Singh407d6d82019-08-02 23:50:35 +0530395#if defined(CONFIG_HL_SUPPORT)
396int ol_tx_deduct_one_credit(struct ol_txrx_pdev_t *pdev)
397{
398 /* TODO: Check if enough credits */
399
400 if (!pdev->cfg.default_tx_comp_req) {
401 ol_tx_target_credit_update(pdev, -1);
402 ol_tx_deduct_one_any_group_credit(pdev);
403 }
404 return 0;
405}
406#endif /* CONFIG_HL_SUPPORT */
407
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408#ifndef OL_TX_RESTORE_HDR
409#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
410#endif
411/*
412 * The following macros could have been inline functions too.
413 * The only rationale for choosing macros, is to force the compiler to inline
414 * the implementation, which cannot be controlled for actual "inline" functions,
415 * since "inline" is only a hint to the compiler.
416 * In the performance path, we choose to force the inlining, in preference to
417 * type-checking offered by the actual inlined functions.
418 */
419#define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
420 TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
421#ifndef ATH_11AC_TXCOMPACT
422#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
423 _lcl_freelist, _tx_desc_last) \
424 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530425 qdf_atomic_init(&(_tx_desc)->ref_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426 /* restore orginal hdr offset */ \
427 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530428 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
429 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800430 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
431 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530432 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
434 (_tx_desc); \
435 } \
436 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
437 } while (0)
438#else /*!ATH_11AC_TXCOMPACT */
439#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
440 _lcl_freelist, _tx_desc_last) \
441 do { \
442 /* restore orginal hdr offset */ \
443 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530444 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
445 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
447 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530448 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
450 (_tx_desc); \
451 } \
452 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
453 } while (0)
454
455#endif /*!ATH_11AC_TXCOMPACT */
456
457#ifdef QCA_TX_SINGLE_COMPLETIONS
458#ifdef QCA_TX_STD_PATH_ONLY
459#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
460 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800461 _tx_desc_last, _status, is_tx_desc_freed) \
Yun Parkf9677152017-04-08 13:29:34 -0700462 { \
gbian1bd297c2016-12-07 11:12:29 +0800463 is_tx_desc_freed = 0; \
464 ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
465 (_netbuf), (_lcl_freelist), \
466 _tx_desc_last) \
Yun Parkf9677152017-04-08 13:29:34 -0700467 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468#else /* !QCA_TX_STD_PATH_ONLY */
469#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
470 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800471 _tx_desc_last, _status, is_tx_desc_freed) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700473 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
gbian1bd297c2016-12-07 11:12:29 +0800474 is_tx_desc_freed = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475 ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
476 (_netbuf), (_lcl_freelist), \
477 (_tx_desc_last)); \
478 } else { \
gbian1bd297c2016-12-07 11:12:29 +0800479 is_tx_desc_freed = 1; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480 ol_tx_desc_frame_free_nonstd( \
481 (_pdev), (_tx_desc), \
482 (_status) != htt_tx_status_ok); \
483 } \
484 } while (0)
485#endif /* !QCA_TX_STD_PATH_ONLY */
486#else /* !QCA_TX_SINGLE_COMPLETIONS */
487#ifdef QCA_TX_STD_PATH_ONLY
488#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
489 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800490 _tx_desc_last, _status, is_tx_desc_freed) \
Yun Parkf9677152017-04-08 13:29:34 -0700491 { \
gbian1bd297c2016-12-07 11:12:29 +0800492 is_tx_desc_freed = 0; \
493 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
494 (_tx_descs), (_status)) \
Yun Parkf9677152017-04-08 13:29:34 -0700495 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496#else /* !QCA_TX_STD_PATH_ONLY */
497#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
498 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800499 _tx_desc_last, _status, is_tx_desc_freed) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700501 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
gbian1bd297c2016-12-07 11:12:29 +0800502 is_tx_desc_freed = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
504 (_tx_descs), (_status)); \
505 } else { \
gbian1bd297c2016-12-07 11:12:29 +0800506 is_tx_desc_freed = 1; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800507 ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
508 (_status) != \
509 htt_tx_status_ok); \
510 } \
511 } while (0)
512#endif /* !QCA_TX_STD_PATH_ONLY */
513#endif /* QCA_TX_SINGLE_COMPLETIONS */
514
Yu Wang053d3e72017-02-08 18:48:24 +0800515#if !defined(CONFIG_HL_SUPPORT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
517{
518 int i = 0;
Leo Chang376398b2015-10-23 14:19:02 -0700519 struct ol_tx_desc_t *tx_desc;
Mohit Khannac3b069b2017-02-17 14:51:51 -0800520 int num_disarded = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521
Leo Chang376398b2015-10-23 14:19:02 -0700522 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
523 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524 /*
525 * Confirm that each tx descriptor is "empty", i.e. it has
526 * no tx frame attached.
527 * In particular, check that there are no frames that have
528 * been given to the target to transmit, for which the
529 * target has never provided a response.
530 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530531 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530532 ol_txrx_dbg(
Mohit Khannac3b069b2017-02-17 14:51:51 -0800533 "Warning: freeing tx desc %d", tx_desc->id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800534 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800535 tx_desc, 1);
Mohit Khannac3b069b2017-02-17 14:51:51 -0800536 num_disarded++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 }
538 }
Mohit Khannac3b069b2017-02-17 14:51:51 -0800539
540 if (num_disarded)
541 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Yun Parkf9677152017-04-08 13:29:34 -0700542 "Warning: freed %d tx descs for which no tx completion rcvd from the target",
543 num_disarded);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544}
Yu Wang053d3e72017-02-08 18:48:24 +0800545#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800546
547void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
548{
549 ol_tx_target_credit_update(pdev, credits);
550
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530551 if (pdev->cfg.is_high_latency)
552 ol_tx_sched(pdev);
553
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800554 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530555 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556}
557
Alok Kumard27a9ed2018-04-17 10:36:24 +0530558#ifdef QCA_LL_TX_FLOW_CONTROL_V2
559/**
560 * ol_tx_flow_pool_lock() - take flow pool lock
561 * @tx_desc: tx desc
562 *
563 * Return: None
564 */
565static inline
566void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
567{
568 struct ol_tx_flow_pool_t *pool;
569
570 pool = tx_desc->pool;
571 qdf_spin_lock_bh(&pool->flow_pool_lock);
572}
573
574/**
575 * ol_tx_flow_pool_unlock() - release flow pool lock
576 * @tx_desc: tx desc
577 *
578 * Return: None
579 */
580static inline
581void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
582{
583 struct ol_tx_flow_pool_t *pool;
584
585 pool = tx_desc->pool;
586 qdf_spin_unlock_bh(&pool->flow_pool_lock);
587}
588#else
589static inline
590void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
591{
592}
593
594static inline
595void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
596{
597}
598#endif
599
Yu Wangceb357b2017-06-01 12:04:18 +0800600#ifdef WLAN_FEATURE_TSF_PLUS
601static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800602 u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
603 int num_msdus)
Yu Wangceb357b2017-06-01 12:04:18 +0800604{
605 u_int32_t has_tx_tsf;
606 u_int32_t has_retry;
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800607
Yu Wangceb357b2017-06-01 12:04:18 +0800608 struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
609 struct htt_tx_compl_ind_append_retries *retry_list = NULL;
610 int offset_dwords;
611
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800612 if (num_msdus <= 0)
Yu Wangceb357b2017-06-01 12:04:18 +0800613 return NULL;
614
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800615 has_tx_tsf = HTT_TX_COMPL_IND_APPEND1_GET(*msg_word_header);
Yu Wangceb357b2017-06-01 12:04:18 +0800616
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800617 /* skip header and MSDUx ID part*/
618 offset_dwords = ((num_msdus + 1) >> 1);
619 *msg_word_payload += offset_dwords;
620
621 if (!has_tx_tsf)
622 return NULL;
623
624 has_retry = HTT_TX_COMPL_IND_APPEND_GET(*msg_word_header);
Yu Wangceb357b2017-06-01 12:04:18 +0800625 if (has_retry) {
626 int retry_index = 0;
627 int width_for_each_retry =
628 (sizeof(struct htt_tx_compl_ind_append_retries) +
629 3) >> 2;
630
631 retry_list = (struct htt_tx_compl_ind_append_retries *)
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800632 (*msg_word_payload + offset_dwords);
Yu Wangceb357b2017-06-01 12:04:18 +0800633 while (retry_list) {
634 if (retry_list[retry_index++].flag == 0)
635 break;
636 }
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800637 offset_dwords = retry_index * width_for_each_retry;
Yu Wangceb357b2017-06-01 12:04:18 +0800638 }
Yu Wangceb357b2017-06-01 12:04:18 +0800639
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800640 *msg_word_payload += offset_dwords;
641 txtstamp_list = (struct htt_tx_compl_ind_append_tx_tstamp *)
642 (*msg_word_payload);
Yu Wangceb357b2017-06-01 12:04:18 +0800643 return txtstamp_list;
644}
645
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800646static inline
647struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
648 u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
649 int num_msdus)
650{
651 u_int32_t has_tx_tstamp64;
652 u_int32_t has_rssi;
653 struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
654
655 int offset_dwords = 0;
656
657 if (num_msdus <= 0)
658 return NULL;
659
660 has_tx_tstamp64 = HTT_TX_COMPL_IND_APPEND3_GET(*msg_word_header);
661 if (!has_tx_tstamp64)
662 return NULL;
663
664 /*skip MSDUx ACK RSSI part*/
665 has_rssi = HTT_TX_COMPL_IND_APPEND2_GET(*msg_word_header);
666 if (has_rssi)
667 offset_dwords = ((num_msdus + 1) >> 1);
668
669 *msg_word_payload = *msg_word_payload + offset_dwords;
670 txtstamp64_list =
671 (struct htt_tx_compl_ind_append_tx_tsf64 *)
672 (*msg_word_payload);
673
674 return txtstamp64_list;
675}
676
Yu Wangceb357b2017-06-01 12:04:18 +0800677static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
678 qdf_nbuf_t netbuf, u_int64_t ts)
679{
680 if (!netbuf)
681 return;
682
683 if (pdev->ol_tx_timestamp_cb)
684 pdev->ol_tx_timestamp_cb(netbuf, ts);
685}
686#else
687static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800688 u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
689 int num_msdus)
690{
691 return NULL;
692}
693
694static inline
695struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
696 u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
697 int num_msdus)
Yu Wangceb357b2017-06-01 12:04:18 +0800698{
699 return NULL;
700}
701
702static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
703 qdf_nbuf_t netbuf, u_int64_t ts)
704{
705}
706#endif
707
Alok Kumar75355aa2018-03-19 17:32:58 +0530708static void ol_tx_update_ack_count(struct ol_tx_desc_t *tx_desc,
709 enum htt_tx_status status)
710{
711 if (!tx_desc->vdev)
712 return;
713
714 if (status == htt_tx_status_ok)
715 ++tx_desc->vdev->txrx_stats.txack_success;
716 else
717 ++tx_desc->vdev->txrx_stats.txack_failed;
718}
Alok Kumar4696fb02018-06-06 00:10:18 +0530719
720/**
721 * ol_tx_notify_completion() - Notify tx completion for this desc
722 * @tx_desc: tx desc
723 * @netbuf: buffer
724 *
725 * Return: none
726 */
727static void ol_tx_notify_completion(struct ol_tx_desc_t *tx_desc,
728 qdf_nbuf_t netbuf)
729{
730 void *osif_dev;
731 ol_txrx_completion_fp tx_compl_cbk = NULL;
732
733 qdf_assert(tx_desc);
734
735 ol_tx_flow_pool_lock(tx_desc);
736
737 if (!tx_desc->vdev ||
738 !tx_desc->vdev->osif_dev) {
739 ol_tx_flow_pool_unlock(tx_desc);
740 return;
741 }
742 osif_dev = tx_desc->vdev->osif_dev;
743 tx_compl_cbk = tx_desc->vdev->tx_comp;
744 ol_tx_flow_pool_unlock(tx_desc);
745
746 if (tx_compl_cbk)
747 tx_compl_cbk(netbuf, osif_dev);
748}
749
Yun Parkf9677152017-04-08 13:29:34 -0700750/**
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530751 * ol_tx_update_connectivity_stats() - update connectivity stats
752 * @tx_desc: tx desc
753 * @netbuf: buffer
754 * @status: htt status
755 *
756 *
757 * Return: none
758 */
759static void ol_tx_update_connectivity_stats(struct ol_tx_desc_t *tx_desc,
760 qdf_nbuf_t netbuf,
761 enum htt_tx_status status)
762{
763 void *osif_dev;
Alok Kumard27a9ed2018-04-17 10:36:24 +0530764 uint32_t pkt_type_bitmap;
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530765 ol_txrx_stats_rx_fp stats_rx = NULL;
766 uint8_t pkt_type = 0;
767
768 qdf_assert(tx_desc);
Alok Kumard27a9ed2018-04-17 10:36:24 +0530769
770 ol_tx_flow_pool_lock(tx_desc);
771 /*
772 * In cases when vdev has gone down and tx completion
773 * are received, leads to NULL vdev access.
774 * So, check for NULL before dereferencing it.
775 */
776 if (!tx_desc->vdev ||
777 !tx_desc->vdev->osif_dev ||
778 !tx_desc->vdev->stats_rx) {
779 ol_tx_flow_pool_unlock(tx_desc);
780 return;
781 }
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530782 osif_dev = tx_desc->vdev->osif_dev;
783 stats_rx = tx_desc->vdev->stats_rx;
Alok Kumard27a9ed2018-04-17 10:36:24 +0530784 ol_tx_flow_pool_unlock(tx_desc);
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530785
Alok Kumard27a9ed2018-04-17 10:36:24 +0530786 pkt_type_bitmap = cds_get_connectivity_stats_pkt_bitmap(osif_dev);
787
788 if (pkt_type_bitmap) {
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530789 if (status != htt_tx_status_download_fail)
790 stats_rx(netbuf, osif_dev,
791 PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
792 if (status == htt_tx_status_ok)
793 stats_rx(netbuf, osif_dev,
794 PKT_TYPE_TX_ACK_CNT, &pkt_type);
795 }
796}
797
798/**
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530799 * ol_tx_update_arp_stats() - update ARP packet TX stats
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530800 * @tx_desc: tx desc
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530801 * @netbuf: buffer
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530802 * @status: htt status
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530803 *
804 *
805 * Return: none
806 */
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530807static void ol_tx_update_arp_stats(struct ol_tx_desc_t *tx_desc,
808 qdf_nbuf_t netbuf,
809 enum htt_tx_status status)
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530810{
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530811 uint32_t tgt_ip;
812
813 qdf_assert(tx_desc);
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -0700814
Alok Kumard27a9ed2018-04-17 10:36:24 +0530815 ol_tx_flow_pool_lock(tx_desc);
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -0700816 if (!tx_desc->vdev) {
Alok Kumard27a9ed2018-04-17 10:36:24 +0530817 ol_tx_flow_pool_unlock(tx_desc);
Manjunathappa Prakash7c985c72018-04-09 18:22:11 -0700818 return;
819 }
820
Poddar, Siddarth31797fa2018-01-22 17:24:15 +0530821 tgt_ip = cds_get_arp_stats_gw_ip(tx_desc->vdev->osif_dev);
Alok Kumard27a9ed2018-04-17 10:36:24 +0530822 ol_tx_flow_pool_unlock(tx_desc);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530823
824 if (tgt_ip == qdf_nbuf_get_arp_tgt_ip(netbuf)) {
825 if (status != htt_tx_status_download_fail)
826 cds_incr_arp_stats_tx_tgt_delivered();
827 if (status == htt_tx_status_ok)
828 cds_incr_arp_stats_tx_tgt_acked();
829 }
830}
831
832/**
Jeff Johnson3dca2222018-05-12 15:10:43 -0700833 * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
Yun Parkf9677152017-04-08 13:29:34 -0700834 * ol_tx_completion_handler().
835 * any change in ol_tx_completion_handler() must be mirrored in
836 * ol_tx_inspect_handler().
837 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838void
839ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
840 int num_msdus,
Yu Wangceb357b2017-06-01 12:04:18 +0800841 enum htt_tx_status status, void *msg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800842{
843 int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800844 uint16_t tx_desc_id;
845 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800846 uint32_t byte_cnt = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530847 qdf_nbuf_t netbuf;
Jingxiang Gebd540b12019-03-13 14:58:52 +0800848#if !defined(REMOVE_PKT_LOG)
Lin Bai1a73a412018-12-13 16:40:14 +0800849 ol_txrx_pktdump_cb packetdump_cb;
Jingxiang Gebd540b12019-03-13 14:58:52 +0800850 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
851#endif
gbian1bd297c2016-12-07 11:12:29 +0800852 uint32_t is_tx_desc_freed = 0;
Yu Wangceb357b2017-06-01 12:04:18 +0800853 struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800854 struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
855 u_int32_t *msg_word_header = (u_int32_t *)msg;
856 /*msg_word skip header*/
857 u_int32_t *msg_word_payload = msg_word_header + 1;
Yu Wangceb357b2017-06-01 12:04:18 +0800858 u_int32_t *msg_word = (u_int32_t *)msg;
859 u_int16_t *desc_ids = (u_int16_t *)(msg_word + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800860 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
861 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
862 ol_tx_desc_list tx_descs;
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800863 uint64_t tx_tsf64;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700864
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800865 TAILQ_INIT(&tx_descs);
866
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530867 ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800868 if (status == htt_tx_status_ok) {
869 txtstamp_list = ol_tx_get_txtstamps(
870 msg_word_header, &msg_word_payload, num_msdus);
871 if (pdev->enable_tx_compl_tsf64)
872 txtstamp64_list = ol_tx_get_txtstamp64s(
873 msg_word_header, &msg_word_payload, num_msdus);
874 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876 for (i = 0; i < num_msdus; i++) {
877 tx_desc_id = desc_ids[i];
Alok Kumar88f50c12018-01-11 14:51:07 +0530878 if (tx_desc_id >= pdev->tx_desc.pool_size) {
879 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
880 "%s: drop due to invalid msdu id = %x\n",
881 __func__, tx_desc_id);
882 continue;
883 }
Leo Chang376398b2015-10-23 14:19:02 -0700884 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Alok Kumar88f50c12018-01-11 14:51:07 +0530885 qdf_assert(tx_desc);
Rakshith Suresh Patkar384a28a2018-11-02 16:43:43 +0530886 ol_tx_desc_update_comp_ts(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800887 tx_desc->status = status;
888 netbuf = tx_desc->netbuf;
Yu Wangceb357b2017-06-01 12:04:18 +0800889
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800890 if (txtstamp64_list) {
891 tx_tsf64 =
892 (u_int64_t)txtstamp64_list[i].tx_tsf64_high << 32 |
893 txtstamp64_list[i].tx_tsf64_low;
894
895 ol_tx_timestamp(pdev, netbuf, tx_tsf64);
896 } else if (txtstamp_list)
Yu Wangceb357b2017-06-01 12:04:18 +0800897 ol_tx_timestamp(pdev, netbuf,
898 (u_int64_t)txtstamp_list->timestamp[i]
899 );
900
Himanshu Agarwal89034612016-07-19 15:59:52 +0530901 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530902
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530903 if (QDF_NBUF_CB_GET_PACKET_TYPE(netbuf) ==
904 QDF_NBUF_CB_PACKET_TYPE_ARP) {
905 if (qdf_nbuf_data_is_arp_req(netbuf))
Jiani Liu6d3b6a12019-05-08 15:15:06 +0800906 ol_tx_update_arp_stats(tx_desc, netbuf,
907 status);
Sravan Kumar Kairamc1ae71c2017-02-24 12:27:27 +0530908 }
909
Alok Kumar4696fb02018-06-06 00:10:18 +0530910 /* check tx completion notification */
911 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
912 ol_tx_notify_completion(tx_desc, netbuf);
913
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530914 /* track connectivity stats */
Alok Kumard27a9ed2018-04-17 10:36:24 +0530915 ol_tx_update_connectivity_stats(tx_desc, netbuf,
916 status);
Alok Kumar75355aa2018-03-19 17:32:58 +0530917 ol_tx_update_ack_count(tx_desc, status);
Poddar, Siddarth3906e172018-01-09 11:24:58 +0530918
Jingxiang Gebd540b12019-03-13 14:58:52 +0800919#if !defined(REMOVE_PKT_LOG)
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530920 if (tx_desc->pkt_type != OL_TX_FRM_TSO) {
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +0530921 packetdump_cb = pdev->ol_tx_packetdump_cb;
922 if (packetdump_cb)
Rakesh Pillai6a36b0a2019-09-06 16:30:05 +0530923 packetdump_cb(soc, pdev->id,
924 tx_desc->vdev_id,
Jingxiang Gebd540b12019-03-13 14:58:52 +0800925 netbuf, status, TX_DATA_PKT);
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530926 }
Jingxiang Gebd540b12019-03-13 14:58:52 +0800927#endif
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530928
Nirav Shah07e39a62016-04-25 17:46:40 +0530929 DPTRACE(qdf_dp_trace_ptr(netbuf,
930 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700931 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah07e39a62016-04-25 17:46:40 +0530932 qdf_nbuf_data_addr(netbuf),
933 sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status));
Houston Hoffmanc2c47622016-07-12 13:05:31 -0700934 htc_pm_runtime_put(pdev->htt_pdev->htc_pdev);
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530935 /*
936 * If credits are reported through credit_update_ind then do not
937 * update group credits on tx_complete_ind.
938 */
939 if (!pdev->cfg.credit_update_enabled)
940 ol_tx_desc_update_group_credit(pdev,
941 tx_desc_id,
942 1, 0, status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800943 /* Per SDU update of byte count */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530944 byte_cnt += qdf_nbuf_len(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800945 if (OL_TX_DESC_NO_REFS(tx_desc)) {
946 ol_tx_statistics(
947 pdev->ctrl_pdev,
948 HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
949 (tx_desc->
950 htt_tx_desc))),
951 status != htt_tx_status_ok);
952 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
gbian1bd297c2016-12-07 11:12:29 +0800953 lcl_freelist, tx_desc_last, status,
954 is_tx_desc_freed);
955
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800956#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
gbian1bd297c2016-12-07 11:12:29 +0800957 if (!is_tx_desc_freed) {
958 tx_desc->pkt_type = ol_tx_frm_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800959#ifdef QCA_COMPUTE_TX_DELAY
gbian1bd297c2016-12-07 11:12:29 +0800960 tx_desc->entry_timestamp_ticks = 0xffffffff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961#endif
gbian1bd297c2016-12-07 11:12:29 +0800962 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963#endif
gbian1bd297c2016-12-07 11:12:29 +0800964 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965 }
966
967 /* One shot protected access to pdev freelist, when setup */
968 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530969 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 tx_desc_last->next = pdev->tx_desc.freelist;
971 pdev->tx_desc.freelist = lcl_freelist;
972 pdev->tx_desc.num_free += (uint16_t) num_msdus;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530973 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974 } else {
975 ol_tx_desc_frame_list_free(pdev, &tx_descs,
976 status != htt_tx_status_ok);
977 }
978
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530979 if (pdev->cfg.is_high_latency) {
980 /*
981 * Credit was already explicitly updated by HTT,
982 * but update the number of available tx descriptors,
983 * then invoke the scheduler, since new credit is probably
984 * available now.
985 */
986 qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
987 ol_tx_sched(pdev);
988 } else {
989 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
990 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800991
992 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530993 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800994 /* Do one shot statistics */
995 TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
996}
997
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530998#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
999
1000void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
1001 u_int16_t tx_desc_id, int credit, u_int8_t absolute,
1002 enum htt_tx_status status)
1003{
1004 uint8_t i, is_member;
1005 uint16_t vdev_id_mask;
1006 struct ol_tx_desc_t *tx_desc;
1007
Sravan Kumar Kairam2be27922018-05-09 17:16:12 +05301008 if (tx_desc_id >= pdev->tx_desc.pool_size) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301009 qdf_print("Invalid desc id");
Sravan Kumar Kairam2be27922018-05-09 17:16:12 +05301010 return;
1011 }
1012
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301013 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1014 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1015 vdev_id_mask =
1016 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1017 pdev->txq_grps[i].membership);
1018 is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
gbian016a42e2017-03-01 18:49:11 +08001019 tx_desc->vdev_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301020 if (is_member) {
1021 ol_txrx_update_group_credit(&pdev->txq_grps[i],
1022 credit, absolute);
1023 break;
1024 }
1025 }
1026 ol_tx_update_group_credit_stats(pdev);
1027}
1028
Ajit Pal Singh407d6d82019-08-02 23:50:35 +05301029void ol_tx_deduct_one_any_group_credit(ol_txrx_pdev_handle pdev)
1030{
1031 int credits_group_0, credits_group_1;
1032
1033 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1034 credits_group_0 = qdf_atomic_read(&pdev->txq_grps[0].credit);
1035 credits_group_1 = qdf_atomic_read(&pdev->txq_grps[1].credit);
1036
1037 if (credits_group_0 > credits_group_1)
1038 ol_txrx_update_group_credit(&pdev->txq_grps[0], -1, 0);
1039 else if (credits_group_1 != 0)
1040 ol_txrx_update_group_credit(&pdev->txq_grps[1], -1, 0);
1041
1042 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1043}
1044
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301045#ifdef DEBUG_HL_LOGGING
1046
1047void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
1048{
1049 uint16_t curr_index;
1050 uint8_t i;
1051
1052 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1053 pdev->grp_stats.last_valid_index++;
1054 if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
1055 - 1)) {
1056 pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
1057 pdev->grp_stats.wrap_around = 1;
1058 }
1059 curr_index = pdev->grp_stats.last_valid_index;
1060
1061 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1062 pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
1063 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1064 pdev->txq_grps[i].membership);
1065 pdev->grp_stats.stats[curr_index].grp[i].credit =
1066 qdf_atomic_read(&pdev->txq_grps[i].credit);
1067 }
1068
1069 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1070}
1071
1072void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
1073{
1074 uint16_t i, j, is_break = 0;
1075 int16_t curr_index, old_index, wrap_around;
bings39e70db2019-07-23 18:16:23 +08001076 uint16_t curr_credit, mem_vdevs;
1077 uint16_t old_credit = 0;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301078
Nirav Shahe6194ac2018-07-13 11:04:41 +05301079 txrx_nofl_info("Group credit stats:");
1080 txrx_nofl_info(" No: GrpID: Credit: Change: vdev_map");
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301081
1082 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1083 curr_index = pdev->grp_stats.last_valid_index;
1084 wrap_around = pdev->grp_stats.wrap_around;
1085 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1086
1087 if (curr_index < 0) {
Nirav Shahe6194ac2018-07-13 11:04:41 +05301088 txrx_nofl_info("Not initialized");
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301089 return;
1090 }
1091
1092 for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
1093 old_index = curr_index - 1;
1094 if (old_index < 0) {
1095 if (wrap_around == 0)
1096 is_break = 1;
1097 else
1098 old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
1099 }
1100
1101 for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
1102 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1103 curr_credit =
1104 pdev->grp_stats.stats[curr_index].
1105 grp[j].credit;
1106 if (!is_break)
1107 old_credit =
1108 pdev->grp_stats.stats[old_index].
1109 grp[j].credit;
1110
1111 mem_vdevs =
1112 pdev->grp_stats.stats[curr_index].grp[j].
1113 member_vdevs;
1114 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1115
1116 if (!is_break)
Nirav Shahe6194ac2018-07-13 11:04:41 +05301117 txrx_nofl_info("%4d: %5d: %6d %6d %8x",
1118 curr_index, j,
1119 curr_credit,
1120 (curr_credit - old_credit),
1121 mem_vdevs);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301122 else
Nirav Shahe6194ac2018-07-13 11:04:41 +05301123 txrx_nofl_info("%4d: %5d: %6d %6s %8x",
1124 curr_index, j,
1125 curr_credit, "NA", mem_vdevs);
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301126 }
1127
1128 if (is_break)
1129 break;
1130
1131 curr_index = old_index;
1132 }
1133}
1134
1135void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
1136{
1137 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1138 qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
1139 pdev->grp_stats.last_valid_index = -1;
1140 pdev->grp_stats.wrap_around = 0;
1141 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1142}
1143#endif
1144#endif
1145
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001146/*
1147 * ol_tx_single_completion_handler performs the same tx completion
1148 * processing as ol_tx_completion_handler, but for a single frame.
1149 * ol_tx_completion_handler is optimized to handle batch completions
1150 * as efficiently as possible; in contrast ol_tx_single_completion_handler
1151 * handles single frames as simply and generally as possible.
1152 * Thus, this ol_tx_single_completion_handler function is suitable for
1153 * intermittent usage, such as for tx mgmt frames.
1154 */
1155void
1156ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
1157 enum htt_tx_status status, uint16_t tx_desc_id)
1158{
1159 struct ol_tx_desc_t *tx_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301160 qdf_nbuf_t netbuf;
Jingxiang Gebd540b12019-03-13 14:58:52 +08001161#if !defined(REMOVE_PKT_LOG)
Lin Bai1a73a412018-12-13 16:40:14 +08001162 ol_txrx_pktdump_cb packetdump_cb;
Jingxiang Gebd540b12019-03-13 14:58:52 +08001163 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1164#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165
gbiane55c9562016-11-01 14:47:47 +08001166 tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07001167 if (!tx_desc) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301168 ol_txrx_err("invalid desc_id(%u), ignore it", tx_desc_id);
gbiane55c9562016-11-01 14:47:47 +08001169 return;
1170 }
1171
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001172 tx_desc->status = status;
1173 netbuf = tx_desc->netbuf;
1174
Nirav Shahcbc6d722016-03-01 16:24:53 +05301175 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001176 /* Do one shot statistics */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301177 TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001178
Jingxiang Gebd540b12019-03-13 14:58:52 +08001179#if !defined(REMOVE_PKT_LOG)
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +05301180 packetdump_cb = pdev->ol_tx_packetdump_cb;
1181 if (packetdump_cb)
Rakesh Pillai6a36b0a2019-09-06 16:30:05 +05301182 packetdump_cb(soc, pdev->id, tx_desc->vdev_id,
Jingxiang Gebd540b12019-03-13 14:58:52 +08001183 netbuf, status, TX_MGMT_PKT);
1184#endif
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301185
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001186 if (OL_TX_DESC_NO_REFS(tx_desc)) {
1187 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1188 status != htt_tx_status_ok);
1189 }
1190
1191 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301192 qdf_atomic_read(&pdev->target_tx_credit),
1193 1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001194
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301195 if (pdev->cfg.is_high_latency) {
1196 /*
1197 * Credit was already explicitly updated by HTT,
1198 * but update the number of available tx descriptors,
1199 * then invoke the scheduler, since new credit is probably
1200 * available now.
1201 */
1202 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
1203 ol_tx_sched(pdev);
1204 } else {
1205 qdf_atomic_add(1, &pdev->target_tx_credit);
1206 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001207}
1208
Yun Parkf9677152017-04-08 13:29:34 -07001209/**
Jeff Johnson3dca2222018-05-12 15:10:43 -07001210 * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
Yun Parkf9677152017-04-08 13:29:34 -07001211 * ol_tx_completion_handler().
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001212 * any change in ol_tx_completion_handler() must be mirrored here.
1213 */
1214void
1215ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
1216 int num_msdus, void *tx_desc_id_iterator)
1217{
1218 uint16_t vdev_id, i;
1219 struct ol_txrx_vdev_t *vdev;
1220 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
1221 uint16_t tx_desc_id;
1222 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001223 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
1224 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301225 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001226 ol_tx_desc_list tx_descs;
gbian1bd297c2016-12-07 11:12:29 +08001227 uint32_t is_tx_desc_freed = 0;
1228
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229 TAILQ_INIT(&tx_descs);
1230
1231 for (i = 0; i < num_msdus; i++) {
1232 tx_desc_id = desc_ids[i];
Alok Kumar88f50c12018-01-11 14:51:07 +05301233 if (tx_desc_id >= pdev->tx_desc.pool_size) {
1234 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1235 "%s: drop due to invalid msdu id = %x\n",
1236 __func__, tx_desc_id);
1237 continue;
1238 }
Leo Chang376398b2015-10-23 14:19:02 -07001239 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Alok Kumar88f50c12018-01-11 14:51:07 +05301240 qdf_assert(tx_desc);
Rakshith Suresh Patkar384a28a2018-11-02 16:43:43 +05301241 ol_tx_desc_update_comp_ts(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001242 netbuf = tx_desc->netbuf;
1243
1244 /* find the "vdev" this tx_desc belongs to */
1245 vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
1246 (tx_desc->htt_tx_desc)));
1247 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1248 if (vdev->vdev_id == vdev_id)
1249 break;
1250 }
1251
1252 /* vdev now points to the vdev for this descriptor. */
1253
1254#ifndef ATH_11AC_TXCOMPACT
1255 /* save this multicast packet to local free list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301256 if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001257#endif
1258 {
Yun Parkf9677152017-04-08 13:29:34 -07001259 /*
1260 * For this function only, force htt status to be
1261 * "htt_tx_status_ok"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001262 * for graceful freeing of this multicast frame
1263 */
1264 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
1265 lcl_freelist, tx_desc_last,
gbian1bd297c2016-12-07 11:12:29 +08001266 htt_tx_status_ok,
1267 is_tx_desc_freed);
gbiane55c9562016-11-01 14:47:47 +08001268#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
gbian1bd297c2016-12-07 11:12:29 +08001269 if (!is_tx_desc_freed) {
1270 tx_desc->pkt_type = ol_tx_frm_freed;
gbiane55c9562016-11-01 14:47:47 +08001271#ifdef QCA_COMPUTE_TX_DELAY
gbian1bd297c2016-12-07 11:12:29 +08001272 tx_desc->entry_timestamp_ticks = 0xffffffff;
gbiane55c9562016-11-01 14:47:47 +08001273#endif
gbian1bd297c2016-12-07 11:12:29 +08001274 }
gbiane55c9562016-11-01 14:47:47 +08001275#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001276 }
1277 }
1278
1279 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301280 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001281 tx_desc_last->next = pdev->tx_desc.freelist;
1282 pdev->tx_desc.freelist = lcl_freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301283 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284 } else {
1285 ol_tx_desc_frame_list_free(pdev, &tx_descs,
1286 htt_tx_status_discard);
1287 }
1288 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301289 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001290 num_msdus,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301291 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292 num_msdus);
1293
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301294 if (pdev->cfg.is_high_latency) {
1295 /* credit was already explicitly updated by HTT */
1296 ol_tx_sched(pdev);
1297 } else {
1298 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1299 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001300}
1301
1302#ifdef QCA_COMPUTE_TX_DELAY
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001303/**
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301304 * ol_tx_set_compute_interval - updates the compute interval
1305 * period for TSM stats.
1306 * @soc_hdl: Datapath soc handle
1307 * @pdev_id: id of data path pdev handle
1308 * @param interval: interval for stats computation
1309 *
1310 * Return: None
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001311 */
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301312void ol_tx_set_compute_interval(struct cdp_soc_t *soc_hdl,
1313 uint8_t pdev_id, uint32_t interval)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001314{
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301315 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1316 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1317
1318 if (!pdev) {
1319 ol_txrx_err("pdev is NULL");
1320 return;
1321 }
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -07001322
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301323 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001324}
1325
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001326/**
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301327 * ol_tx_packet_count() - Return the uplink (transmitted) packet count
1328 and loss count.
1329 * @soc_hdl: soc handle
1330 * @pdev_id: pdev identifier
1331 * @out_packet_count - number of packets transmitted
1332 * @out_packet_loss_count - number of packets lost
1333 * @category - access category of interest
1334 *
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001335 * This function will be called for getting uplink packet count and
1336 * loss count for given stream (access category) a regular interval.
1337 * This also resets the counters hence, the value returned is packets
1338 * counted in last 5(default) second interval. These counter are
1339 * incremented per access category in ol_tx_completion_handler()
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001340 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001341void
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301342ol_tx_packet_count(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001343 uint16_t *out_packet_count,
1344 uint16_t *out_packet_loss_count, int category)
1345{
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301346 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1347 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1348
1349 if (!pdev) {
1350 ol_txrx_err("pdev is NULL");
1351 return;
1352 }
1353
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001354 *out_packet_count = pdev->packet_count[category];
1355 *out_packet_loss_count = pdev->packet_loss_count[category];
1356 pdev->packet_count[category] = 0;
1357 pdev->packet_loss_count[category] = 0;
1358}
1359
Jeff Johnsonf89f58f2016-10-14 09:58:29 -07001360static uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001361{
1362 uint32_t sum32;
1363 int shift = 0;
1364 /*
1365 * To avoid doing a 64-bit divide, shift the sum down until it is
1366 * no more than 32 bits (and shift the denominator to match).
1367 */
1368 while ((sum >> 32) != 0) {
1369 sum >>= 1;
1370 shift++;
1371 }
1372 sum32 = (uint32_t) sum;
1373 num >>= shift;
1374 return (sum32 + (num >> 1)) / num; /* round to nearest */
1375}
1376
1377void
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301378ol_tx_delay(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001379 uint32_t *queue_delay_microsec,
1380 uint32_t *tx_delay_microsec, int category)
1381{
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301382 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1383 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001384 int index;
1385 uint32_t avg_delay_ticks;
1386 struct ol_tx_delay_data *data;
1387
Anurag Chouhanc5548422016-02-24 18:33:27 +05301388 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001389
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301390 if (!pdev) {
1391 ol_txrx_err("pdev is NULL");
1392 return;
1393 }
1394
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301395 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001396 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1397
1398 data = &pdev->tx_delay.cats[category].copies[index];
1399
1400 if (data->avgs.transmit_num > 0) {
1401 avg_delay_ticks =
1402 ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
1403 data->avgs.transmit_num);
1404 *tx_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301405 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001406 } else {
1407 /*
1408 * This case should only happen if there's a query
1409 * within 5 sec after the first tx data frame.
1410 */
1411 *tx_delay_microsec = 0;
1412 }
1413 if (data->avgs.queue_num > 0) {
1414 avg_delay_ticks =
1415 ol_tx_delay_avg(data->avgs.queue_sum_ticks,
1416 data->avgs.queue_num);
1417 *queue_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301418 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419 } else {
1420 /*
1421 * This case should only happen if there's a query
1422 * within 5 sec after the first tx data frame.
1423 */
1424 *queue_delay_microsec = 0;
1425 }
1426
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301427 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001428}
1429
1430void
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301431ol_tx_delay_hist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432 uint16_t *report_bin_values, int category)
1433{
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301434 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1435 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001436 int index, i, j;
1437 struct ol_tx_delay_data *data;
1438
Anurag Chouhanc5548422016-02-24 18:33:27 +05301439 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001440
Rakesh Pillaief35fab2019-07-04 20:13:09 +05301441 if (!pdev) {
1442 ol_txrx_err("pdev is NULL");
1443 return;
1444 }
1445
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301446 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001447 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1448
1449 data = &pdev->tx_delay.cats[category].copies[index];
1450
1451 for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
1452 uint16_t internal_bin_sum = 0;
Yun Parkf9677152017-04-08 13:29:34 -07001453
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001454 while (j < (1 << i))
1455 internal_bin_sum += data->hist_bins_queue[j++];
1456
1457 report_bin_values[i] = internal_bin_sum;
1458 }
1459 report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
1460
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301461 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001462}
1463
1464#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001465static uint8_t
1466ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301467 qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001468{
1469 uint16_t ethertype;
1470 uint8_t *dest_addr, *l3_hdr;
1471 int is_mgmt, is_mcast;
1472 int l2_hdr_size;
1473
1474 dest_addr = ol_tx_dest_addr_find(pdev, msdu);
Jeff Johnson6795c3a2019-03-18 13:43:04 -07001475 if (!dest_addr)
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301476 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001477
1478 is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
1479 is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
1480 if (is_mgmt) {
1481 return (is_mcast) ?
1482 OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
1483 HTT_TX_EXT_TID_MGMT;
1484 }
1485 if (is_mcast)
1486 return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
1487
1488 if (pdev->frame_format == wlan_frm_fmt_802_3) {
1489 struct ethernet_hdr_t *enet_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001490
Nirav Shahcbc6d722016-03-01 16:24:53 +05301491 enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001492 l2_hdr_size = sizeof(struct ethernet_hdr_t);
1493 ethertype =
1494 (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
1495 if (!IS_ETHERTYPE(ethertype)) {
1496 struct llc_snap_hdr_t *llc_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001497
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001498 llc_hdr = (struct llc_snap_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301499 (qdf_nbuf_data(msdu) + l2_hdr_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001500 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1501 ethertype =
1502 (llc_hdr->ethertype[0] << 8) | llc_hdr->
1503 ethertype[1];
1504 }
1505 } else {
1506 struct llc_snap_hdr_t *llc_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001507
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001508 l2_hdr_size = sizeof(struct ieee80211_frame);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301509 llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001510 + l2_hdr_size);
1511 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1512 ethertype =
1513 (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1514 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301515 l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001516 if (ETHERTYPE_IPV4 == ethertype) {
1517 return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
1518 } else if (ETHERTYPE_IPV6 == ethertype) {
1519 return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
1520 0x7;
1521 } else {
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301522 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001523 }
1524}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001525
1526static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1527{
Leo Chang376398b2015-10-23 14:19:02 -07001528 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001529 uint8_t tid;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301530 qdf_nbuf_t msdu = tx_desc->netbuf;
Yun Parkf9677152017-04-08 13:29:34 -07001531
Nirav Shahcbc6d722016-03-01 16:24:53 +05301532 tid = qdf_nbuf_get_tid(msdu);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301533 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001534 tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301535 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Yun Parkf9677152017-04-08 13:29:34 -07001536 /*
1537 * TID could not be determined
1538 * (this is not an IP frame?)
1539 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001540 return -EINVAL;
1541 }
1542 }
1543 return tid;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001544}
Nirav Shah52d85aa2018-04-26 14:03:00 +05301545#else
1546static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1547{
1548 return 0;
1549}
1550#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001551
1552static inline int
1553ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
1554{
1555 int bin;
1556 /*
1557 * For speed, multiply and shift to approximate a divide. This causes
1558 * a small error, but the approximation error should be much less
1559 * than the other uncertainties in the tx delay computation.
1560 */
1561 bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
1562 pdev->tx_delay.hist_internal_bin_width_shift;
1563 if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
1564 bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
1565
1566 return bin;
1567}
1568
1569static void
1570ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
1571 enum htt_tx_status status,
1572 uint16_t *desc_ids, int num_msdus)
1573{
1574 int i, index, cat;
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301575 uint32_t now_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001576 uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
1577 uint32_t avg_time_ticks;
1578 struct ol_tx_delay_data *data;
1579
Anurag Chouhanc5548422016-02-24 18:33:27 +05301580 qdf_assert(num_msdus > 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001581
1582 /*
1583 * keep static counters for total packet and lost packets
1584 * reset them in ol_tx_delay(), function used to fetch the stats
1585 */
1586
1587 cat = ol_tx_delay_category(pdev, desc_ids[0]);
1588 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1589 return;
1590
1591 pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
1592 if (status != htt_tx_status_ok) {
1593 for (i = 0; i < num_msdus; i++) {
1594 cat = ol_tx_delay_category(pdev, desc_ids[i]);
1595 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1596 return;
1597 pdev->packet_loss_count[cat]++;
1598 }
1599 return;
1600 }
1601
1602 /* since we may switch the ping-pong index, provide mutex w. readers */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301603 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001604 index = pdev->tx_delay.cats[cat].in_progress_idx;
1605
1606 data = &pdev->tx_delay.cats[cat].copies[index];
1607
1608 if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
1609 tx_delay_transmit_ticks =
1610 now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
1611 /*
1612 * We'd like to account for the number of MSDUs that were
1613 * transmitted together, but we don't know this. All we know
1614 * is the number of MSDUs that were acked together.
1615 * Since the frame error rate is small, this is nearly the same
1616 * as the number of frames transmitted together.
1617 */
1618 data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
1619 data->avgs.transmit_num += num_msdus;
1620 }
1621 pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
1622
1623 for (i = 0; i < num_msdus; i++) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001624 int bin;
Leo Chang376398b2015-10-23 14:19:02 -07001625 uint16_t id = desc_ids[i];
1626 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001627
1628 tx_delay_queue_ticks =
1629 now_ticks - tx_desc->entry_timestamp_ticks;
1630
1631 data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
1632 data->avgs.queue_num++;
1633 bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
1634 data->hist_bins_queue[bin]++;
1635 }
1636
1637 /* check if it's time to start a new average */
1638 avg_time_ticks =
1639 now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
1640 if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
1641 pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
1642 index = 1 - index;
1643 pdev->tx_delay.cats[cat].in_progress_idx = index;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301644 qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001645 sizeof(pdev->tx_delay.cats[cat].copies[index]));
1646 }
1647
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301648 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001649}
1650
1651#endif /* QCA_COMPUTE_TX_DELAY */
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301652
Yu Wangceb357b2017-06-01 12:04:18 +08001653#ifdef WLAN_FEATURE_TSF_PLUS
1654void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)
1655{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05301656 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1657 ol_txrx_pdev_handle pdev;
Yu Wangceb357b2017-06-01 12:04:18 +08001658
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05301659 if (qdf_unlikely(!soc)) {
1660 ol_txrx_err("soc is NULL");
1661 return;
1662 }
1663
1664 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Yu Wangceb357b2017-06-01 12:04:18 +08001665 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301666 ol_txrx_err("pdev is NULL");
Yu Wangceb357b2017-06-01 12:04:18 +08001667 return;
1668 }
1669 pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb;
1670}
1671
1672void ol_deregister_timestamp_callback(void)
1673{
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05301674 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1675 ol_txrx_pdev_handle pdev;
Yu Wangceb357b2017-06-01 12:04:18 +08001676
Rakesh Pillai6c5af2f2019-09-25 15:33:07 +05301677 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
Yu Wangceb357b2017-06-01 12:04:18 +08001678 if (!pdev) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301679 ol_txrx_err("pdev is NULL");
Yu Wangceb357b2017-06-01 12:04:18 +08001680 return;
1681 }
1682 pdev->ol_tx_timestamp_cb = NULL;
1683}
1684#endif