blob: cd3cd8868c7f9dafa1321a1b8eb7ee2d02e52eba [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffman1460fa32015-11-18 02:36:30 -08002 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053028#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053029#include <qdf_lock.h> /* qdf_os_spinlock */
Anurag Chouhan50220ce2016-02-18 20:11:33 +053030#include <qdf_time.h> /* qdf_system_ticks, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053031#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhanc73697b2016-02-21 15:05:43 +053032#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
34#include <cds_queue.h> /* TAILQ */
35#ifdef QCA_COMPUTE_TX_DELAY
36#include <ieee80211.h> /* ieee80211_frame, etc. */
37#include <enet.h> /* ethernet_hdr_t, etc. */
38#include <ipv6_defs.h> /* ipv6_traffic_class */
39#endif
40
41#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
42#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
43#include <ol_txrx_htt_api.h> /* htt_tx_status */
44
45#include <ol_ctrl_txrx_api.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
47#ifdef QCA_COMPUTE_TX_DELAY
48#endif
49#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
50#include <ol_osif_txrx_api.h>
51#include <ol_tx.h> /* ol_tx_reinject */
52
53#include <ol_cfg.h> /* ol_cfg_is_high_latency */
54#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
55#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
56#endif
57
58#ifdef TX_CREDIT_RECLAIM_SUPPORT
59
60#define OL_TX_CREDIT_RECLAIM(pdev) \
61 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053062 if (qdf_atomic_read(&pdev->target_tx_credit) < \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080063 ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
64 ol_osif_ath_tasklet(pdev->osdev); \
65 } \
66 } while (0)
67
68#else
69
70#define OL_TX_CREDIT_RECLAIM(pdev)
71
72#endif /* TX_CREDIT_RECLAIM_SUPPORT */
73
74#if defined(TX_CREDIT_RECLAIM_SUPPORT)
75/*
76 * HL needs to keep track of the amount of credit available to download
77 * tx frames to the target - the download scheduler decides when to
78 * download frames, and which frames to download, based on the credit
79 * availability.
80 * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
81 * of the target_tx_credit, to determine when to poll for tx completion
82 * messages.
83 */
84#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053085 qdf_atomic_add( \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080086 factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit)
87#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \
88 OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu)
89#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \
90 OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu)
91#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053092 qdf_atomic_add(-1 * delta, &pdev->target_tx_credit)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080093#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053094 qdf_atomic_add(delta, &pdev->target_tx_credit)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080095#else
96/*
97 * LL does not need to keep track of target credit.
98 * Since the host tx descriptor pool size matches the target's,
99 * we know the target has space for the new tx frame if the host's
100 * tx descriptor allocation succeeded.
101 */
102#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) /* no-op */
103#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) /* no-op */
104#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) /* no-op */
105#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) /* no-op */
106#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) /* no-op */
107#endif
108
109#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
110#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) \
111 do { \
112 struct ol_txrx_vdev_t *vdev; \
113 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530114 if (qdf_atomic_read(&vdev->os_q_paused) && \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800115 (vdev->tx_fl_hwm != 0)) { \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530116 qdf_spin_lock(&pdev->tx_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800117 if (pdev->tx_desc.num_free > \
118 vdev->tx_fl_hwm) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530119 qdf_atomic_set(&vdev->os_q_paused, 0); \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530120 qdf_spin_unlock(&pdev->tx_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800121 ol_txrx_flow_control_cb(vdev, true);\
122 } \
123 else { \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530124 qdf_spin_unlock(&pdev->tx_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800125 } \
126 } \
127 } \
128 } while (0)
129#else
130#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)
131#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
132
133static inline uint16_t
134ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530135 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136{
137 int msdu_credit_consumed;
138
Nirav Shahcbc6d722016-03-01 16:24:53 +0530139 TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140 TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530141 qdf_atomic_read(&pdev->target_tx_credit),
142 qdf_atomic_read(&pdev->target_tx_credit) - 1,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530143 qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144
145 msdu_credit_consumed = htt_tx_msdu_credit(msdu);
146 OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
147 OL_TX_CREDIT_RECLAIM(pdev);
148
149 /*
150 * When the tx frame is downloaded to the target, there are two
151 * outstanding references:
152 * 1. The host download SW (HTT, HTC, HIF)
153 * This reference is cleared by the ol_tx_send_done callback
154 * functions.
155 * 2. The target FW
156 * This reference is cleared by the ol_tx_completion_handler
157 * function.
158 * It is extremely probable that the download completion is processed
159 * before the tx completion message. However, under exceptional
160 * conditions the tx completion may be processed first. Thus, rather
161 * that assuming that reference (1) is done before reference (2),
162 * explicit reference tracking is needed.
163 * Double-increment the ref count to account for both references
164 * described above.
165 */
166
167 OL_TX_DESC_REF_INIT(tx_desc);
168 OL_TX_DESC_REF_INC(tx_desc);
169 OL_TX_DESC_REF_INC(tx_desc);
170
171 return msdu_credit_consumed;
172}
173
174void
175ol_tx_send(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530176 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177{
178 int msdu_credit_consumed;
179 uint16_t id;
180 int failed;
181
182 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
183 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530184 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530185 DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530186 (uint8_t *)(qdf_nbuf_data(msdu)),
187 sizeof(qdf_nbuf_data(msdu))));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800188 failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530189 if (qdf_unlikely(failed)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
191 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
192 }
193}
194
195void
196ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530197 qdf_nbuf_t head_msdu, int num_msdus)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530199 qdf_nbuf_t rejected;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 OL_TX_CREDIT_RECLAIM(pdev);
201
202 rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530203 while (qdf_unlikely(rejected)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800204 struct ol_tx_desc_t *tx_desc;
205 uint16_t *msdu_id_storage;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530206 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207
Nirav Shahcbc6d722016-03-01 16:24:53 +0530208 next = qdf_nbuf_next(rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209 msdu_id_storage = ol_tx_msdu_id_storage(rejected);
210 tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
211
212 OL_TX_TARGET_CREDIT_INCR(pdev, rejected);
213 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
214
215 rejected = next;
216 }
217}
218
219void
220ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
221 struct ol_tx_desc_t *tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530222 qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223{
224 int msdu_credit_consumed;
225 uint16_t id;
226 int failed;
227
228 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
229 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530230 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800231 failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
232 if (failed) {
233 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
234 "Error: freeing tx frame after htt_tx failed");
235 OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
236 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
237 }
238}
239
240static inline void
241ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530242 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243{
244 struct ol_tx_desc_t *tx_desc;
245
246 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530247 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248
249 /*
250 * If the download is done for
251 * the Management frame then
252 * call the download callback if registered
253 */
254 if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
255 int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
256 ol_txrx_mgmt_tx_cb download_cb =
257 pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb;
258
259 if (download_cb) {
260 download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt,
261 tx_desc->netbuf, status != A_OK);
262 }
263 }
264
265 if (status != A_OK) {
266 OL_TX_TARGET_CREDIT_INCR(pdev, msdu);
267 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
268 1 /* download err */);
269 } else {
270 if (OL_TX_DESC_NO_REFS(tx_desc)) {
271 /*
272 * The decremented value was zero - free the frame.
273 * Use the tx status recorded previously during
274 * tx completion handling.
275 */
276 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
277 tx_desc->status !=
278 htt_tx_status_ok);
279 }
280 }
281}
282
283void
284ol_tx_download_done_ll(void *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530285 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286{
287 ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
288 msdu_id);
289}
290
291void
292ol_tx_download_done_hl_retain(void *txrx_pdev,
293 A_STATUS status,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530294 qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295{
296 struct ol_txrx_pdev_t *pdev = txrx_pdev;
297 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
298}
299
300void
301ol_tx_download_done_hl_free(void *txrx_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530302 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303{
304 struct ol_txrx_pdev_t *pdev = txrx_pdev;
305 struct ol_tx_desc_t *tx_desc;
306
307 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530308 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309
310 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
311
312 if ((tx_desc->pkt_type != ol_tx_frm_no_free) &&
313 (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530314 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
316 }
317}
318
319void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
320{
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530321 qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322}
323
324void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
325{
326 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530327 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328 credit_delta,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530329 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330 credit_delta);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530331 qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332}
333
334#ifdef QCA_COMPUTE_TX_DELAY
335
336static void
337ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
338 enum htt_tx_status status,
339 uint16_t *desc_ids, int num_msdus);
340#define OL_TX_DELAY_COMPUTE ol_tx_delay_compute
341#else
342#define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus) /* no-op */
343#endif /* QCA_COMPUTE_TX_DELAY */
344
345#ifndef OL_TX_RESTORE_HDR
346#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
347#endif
348/*
349 * The following macros could have been inline functions too.
350 * The only rationale for choosing macros, is to force the compiler to inline
351 * the implementation, which cannot be controlled for actual "inline" functions,
352 * since "inline" is only a hint to the compiler.
353 * In the performance path, we choose to force the inlining, in preference to
354 * type-checking offered by the actual inlined functions.
355 */
356#define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
357 TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
358#ifndef ATH_11AC_TXCOMPACT
359#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
360 _lcl_freelist, _tx_desc_last) \
361 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530362 qdf_atomic_init(&(_tx_desc)->ref_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363 /* restore orginal hdr offset */ \
364 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530365 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
366 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
368 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530369 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800370 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
371 (_tx_desc); \
372 } \
373 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
374 } while (0)
375#else /*!ATH_11AC_TXCOMPACT */
376#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
377 _lcl_freelist, _tx_desc_last) \
378 do { \
379 /* restore orginal hdr offset */ \
380 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530381 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
382 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
384 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530385 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
387 (_tx_desc); \
388 } \
389 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
390 } while (0)
391
392#endif /*!ATH_11AC_TXCOMPACT */
393
394#ifdef QCA_TX_SINGLE_COMPLETIONS
395#ifdef QCA_TX_STD_PATH_ONLY
396#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
397 _netbuf, _lcl_freelist, \
398 _tx_desc_last, _status) \
399 ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
400 (_netbuf), (_lcl_freelist), \
401 _tx_desc_last)
402#else /* !QCA_TX_STD_PATH_ONLY */
403#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
404 _netbuf, _lcl_freelist, \
405 _tx_desc_last, _status) \
406 do { \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530407 if (qdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
409 (_netbuf), (_lcl_freelist), \
410 (_tx_desc_last)); \
411 } else { \
412 ol_tx_desc_frame_free_nonstd( \
413 (_pdev), (_tx_desc), \
414 (_status) != htt_tx_status_ok); \
415 } \
416 } while (0)
417#endif /* !QCA_TX_STD_PATH_ONLY */
418#else /* !QCA_TX_SINGLE_COMPLETIONS */
419#ifdef QCA_TX_STD_PATH_ONLY
420#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
421 _netbuf, _lcl_freelist, \
422 _tx_desc_last, _status) \
423 ol_tx_msdus_complete_batch((_pdev), (_tx_desc), (_tx_descs), (_status))
424#else /* !QCA_TX_STD_PATH_ONLY */
425#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
426 _netbuf, _lcl_freelist, \
427 _tx_desc_last, _status) \
428 do { \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530429 if (qdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800430 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
431 (_tx_descs), (_status)); \
432 } else { \
433 ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
434 (_status) != \
435 htt_tx_status_ok); \
436 } \
437 } while (0)
438#endif /* !QCA_TX_STD_PATH_ONLY */
439#endif /* QCA_TX_SINGLE_COMPLETIONS */
440
441void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
442{
443 int i = 0;
Leo Chang376398b2015-10-23 14:19:02 -0700444 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445
Leo Chang376398b2015-10-23 14:19:02 -0700446 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
447 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448 /*
449 * Confirm that each tx descriptor is "empty", i.e. it has
450 * no tx frame attached.
451 * In particular, check that there are no frames that have
452 * been given to the target to transmit, for which the
453 * target has never provided a response.
454 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530455 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
457 "Warning: freeing tx frame "
458 "(no tx completion from the target)\n");
459 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460 tx_desc, 1);
461 }
462 }
463}
464
465void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
466{
467 ol_tx_target_credit_update(pdev, credits);
468
469 /* UNPAUSE OS Q */
470 OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
471}
472
473/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
474 ol_tx_completion_handler().
475 * any change in ol_tx_completion_handler() must be mirrored in
476 ol_tx_inspect_handler().
477*/
478void
479ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
480 int num_msdus,
481 enum htt_tx_status status, void *tx_desc_id_iterator)
482{
483 int i;
484 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
485 uint16_t tx_desc_id;
486 struct ol_tx_desc_t *tx_desc;
487 char *trace_str;
488
489 uint32_t byte_cnt = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530490 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491
492 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
493 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
494 ol_tx_desc_list tx_descs;
495 TAILQ_INIT(&tx_descs);
496
497 OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus);
498
499 trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
500 for (i = 0; i < num_msdus; i++) {
501 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700502 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503 tx_desc->status = status;
504 netbuf = tx_desc->netbuf;
505
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530506 qdf_runtime_pm_put();
Nirav Shahcbc6d722016-03-01 16:24:53 +0530507 qdf_nbuf_trace_update(netbuf, trace_str);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508 /* Per SDU update of byte count */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530509 byte_cnt += qdf_nbuf_len(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510 if (OL_TX_DESC_NO_REFS(tx_desc)) {
511 ol_tx_statistics(
512 pdev->ctrl_pdev,
513 HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
514 (tx_desc->
515 htt_tx_desc))),
516 status != htt_tx_status_ok);
517 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
518 lcl_freelist, tx_desc_last, status);
519 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530520 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
522 tx_desc->pkt_type = 0xff;
523#ifdef QCA_COMPUTE_TX_DELAY
524 tx_desc->entry_timestamp_ticks = 0xffffffff;
525#endif
526#endif
527 }
528
529 /* One shot protected access to pdev freelist, when setup */
530 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530531 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532 tx_desc_last->next = pdev->tx_desc.freelist;
533 pdev->tx_desc.freelist = lcl_freelist;
534 pdev->tx_desc.num_free += (uint16_t) num_msdus;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530535 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800536 } else {
537 ol_tx_desc_frame_list_free(pdev, &tx_descs,
538 status != htt_tx_status_ok);
539 }
540
541 OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
542
543 /* UNPAUSE OS Q */
544 OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
545 /* Do one shot statistics */
546 TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
547}
548
549/*
550 * ol_tx_single_completion_handler performs the same tx completion
551 * processing as ol_tx_completion_handler, but for a single frame.
552 * ol_tx_completion_handler is optimized to handle batch completions
553 * as efficiently as possible; in contrast ol_tx_single_completion_handler
554 * handles single frames as simply and generally as possible.
555 * Thus, this ol_tx_single_completion_handler function is suitable for
556 * intermittent usage, such as for tx mgmt frames.
557 */
558void
559ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
560 enum htt_tx_status status, uint16_t tx_desc_id)
561{
562 struct ol_tx_desc_t *tx_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530563 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564
Leo Chang376398b2015-10-23 14:19:02 -0700565 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566 tx_desc->status = status;
567 netbuf = tx_desc->netbuf;
568
Nirav Shahcbc6d722016-03-01 16:24:53 +0530569 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 /* Do one shot statistics */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530571 TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572
573 if (OL_TX_DESC_NO_REFS(tx_desc)) {
574 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
575 status != htt_tx_status_ok);
576 }
577
578 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530579 qdf_atomic_read(&pdev->target_tx_credit),
580 1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581
582
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530583 qdf_atomic_add(1, &pdev->target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584}
585
586/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
587 ol_tx_completion_handler().
588 * any change in ol_tx_completion_handler() must be mirrored here.
589 */
590void
591ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
592 int num_msdus, void *tx_desc_id_iterator)
593{
594 uint16_t vdev_id, i;
595 struct ol_txrx_vdev_t *vdev;
596 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
597 uint16_t tx_desc_id;
598 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
600 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530601 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 ol_tx_desc_list tx_descs;
603 TAILQ_INIT(&tx_descs);
604
605 for (i = 0; i < num_msdus; i++) {
606 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700607 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800608 netbuf = tx_desc->netbuf;
609
610 /* find the "vdev" this tx_desc belongs to */
611 vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
612 (tx_desc->htt_tx_desc)));
613 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
614 if (vdev->vdev_id == vdev_id)
615 break;
616 }
617
618 /* vdev now points to the vdev for this descriptor. */
619
620#ifndef ATH_11AC_TXCOMPACT
621 /* save this multicast packet to local free list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530622 if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800623#endif
624 {
625 /* for this function only, force htt status to be
626 "htt_tx_status_ok"
627 * for graceful freeing of this multicast frame
628 */
629 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
630 lcl_freelist, tx_desc_last,
631 htt_tx_status_ok);
632 }
633 }
634
635 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530636 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637 tx_desc_last->next = pdev->tx_desc.freelist;
638 pdev->tx_desc.freelist = lcl_freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530639 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800640 } else {
641 ol_tx_desc_frame_list_free(pdev, &tx_descs,
642 htt_tx_status_discard);
643 }
644 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530645 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800646 num_msdus,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530647 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800648 num_msdus);
649
650 OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
651}
652
653#ifdef QCA_COMPUTE_TX_DELAY
654
655void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
656{
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530657 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658}
659
660void
661ol_tx_packet_count(ol_txrx_pdev_handle pdev,
662 uint16_t *out_packet_count,
663 uint16_t *out_packet_loss_count, int category)
664{
665 *out_packet_count = pdev->packet_count[category];
666 *out_packet_loss_count = pdev->packet_loss_count[category];
667 pdev->packet_count[category] = 0;
668 pdev->packet_loss_count[category] = 0;
669}
670
671uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
672{
673 uint32_t sum32;
674 int shift = 0;
675 /*
676 * To avoid doing a 64-bit divide, shift the sum down until it is
677 * no more than 32 bits (and shift the denominator to match).
678 */
679 while ((sum >> 32) != 0) {
680 sum >>= 1;
681 shift++;
682 }
683 sum32 = (uint32_t) sum;
684 num >>= shift;
685 return (sum32 + (num >> 1)) / num; /* round to nearest */
686}
687
688void
689ol_tx_delay(ol_txrx_pdev_handle pdev,
690 uint32_t *queue_delay_microsec,
691 uint32_t *tx_delay_microsec, int category)
692{
693 int index;
694 uint32_t avg_delay_ticks;
695 struct ol_tx_delay_data *data;
696
Anurag Chouhanc5548422016-02-24 18:33:27 +0530697 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800698
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530699 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800700 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
701
702 data = &pdev->tx_delay.cats[category].copies[index];
703
704 if (data->avgs.transmit_num > 0) {
705 avg_delay_ticks =
706 ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
707 data->avgs.transmit_num);
708 *tx_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530709 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800710 } else {
711 /*
712 * This case should only happen if there's a query
713 * within 5 sec after the first tx data frame.
714 */
715 *tx_delay_microsec = 0;
716 }
717 if (data->avgs.queue_num > 0) {
718 avg_delay_ticks =
719 ol_tx_delay_avg(data->avgs.queue_sum_ticks,
720 data->avgs.queue_num);
721 *queue_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530722 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 } else {
724 /*
725 * This case should only happen if there's a query
726 * within 5 sec after the first tx data frame.
727 */
728 *queue_delay_microsec = 0;
729 }
730
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530731 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800732}
733
734void
735ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
736 uint16_t *report_bin_values, int category)
737{
738 int index, i, j;
739 struct ol_tx_delay_data *data;
740
Anurag Chouhanc5548422016-02-24 18:33:27 +0530741 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800742
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530743 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
745
746 data = &pdev->tx_delay.cats[category].copies[index];
747
748 for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
749 uint16_t internal_bin_sum = 0;
750 while (j < (1 << i))
751 internal_bin_sum += data->hist_bins_queue[j++];
752
753 report_bin_values[i] = internal_bin_sum;
754 }
755 report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
756
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530757 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800758}
759
760#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
761static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530762 qdf_nbuf_t tx_nbuf)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800763{
764 uint8_t *hdr_ptr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530765 void *datap = qdf_nbuf_data(tx_nbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766
767 if (pdev->frame_format == wlan_frm_fmt_raw) {
768 /* adjust hdr_ptr to RA */
769 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
770 hdr_ptr = wh->i_addr1;
771 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
772 /* adjust hdr_ptr to RA */
773 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
774 hdr_ptr = wh->i_addr1;
775 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
776 hdr_ptr = datap;
777 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530778 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779 "Invalid standard frame type: %d",
780 pdev->frame_format);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530781 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800782 hdr_ptr = NULL;
783 }
784 return hdr_ptr;
785}
786
787static uint8_t
788ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530789 qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800790{
791 uint16_t ethertype;
792 uint8_t *dest_addr, *l3_hdr;
793 int is_mgmt, is_mcast;
794 int l2_hdr_size;
795
796 dest_addr = ol_tx_dest_addr_find(pdev, msdu);
797 if (NULL == dest_addr)
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530798 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800799
800 is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
801 is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
802 if (is_mgmt) {
803 return (is_mcast) ?
804 OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
805 HTT_TX_EXT_TID_MGMT;
806 }
807 if (is_mcast)
808 return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
809
810 if (pdev->frame_format == wlan_frm_fmt_802_3) {
811 struct ethernet_hdr_t *enet_hdr;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530812 enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800813 l2_hdr_size = sizeof(struct ethernet_hdr_t);
814 ethertype =
815 (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
816 if (!IS_ETHERTYPE(ethertype)) {
817 struct llc_snap_hdr_t *llc_hdr;
818 llc_hdr = (struct llc_snap_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530819 (qdf_nbuf_data(msdu) + l2_hdr_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800820 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
821 ethertype =
822 (llc_hdr->ethertype[0] << 8) | llc_hdr->
823 ethertype[1];
824 }
825 } else {
826 struct llc_snap_hdr_t *llc_hdr;
827 l2_hdr_size = sizeof(struct ieee80211_frame);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530828 llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 + l2_hdr_size);
830 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
831 ethertype =
832 (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
833 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530834 l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800835 if (ETHERTYPE_IPV4 == ethertype) {
836 return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
837 } else if (ETHERTYPE_IPV6 == ethertype) {
838 return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
839 0x7;
840 } else {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530841 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800842 }
843}
844#endif
845
846static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
847{
848#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Leo Chang376398b2015-10-23 14:19:02 -0700849 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850 uint8_t tid;
851
Nirav Shahcbc6d722016-03-01 16:24:53 +0530852 qdf_nbuf_t msdu = tx_desc->netbuf;
853 tid = qdf_nbuf_get_tid(msdu);
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530854 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800855 tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530856 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800857 /* TID could not be determined
858 (this is not an IP frame?) */
859 return -EINVAL;
860 }
861 }
862 return tid;
863#else
864 return 0;
865#endif
866}
867
868static inline int
869ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
870{
871 int bin;
872 /*
873 * For speed, multiply and shift to approximate a divide. This causes
874 * a small error, but the approximation error should be much less
875 * than the other uncertainties in the tx delay computation.
876 */
877 bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
878 pdev->tx_delay.hist_internal_bin_width_shift;
879 if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
880 bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
881
882 return bin;
883}
884
885static void
886ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
887 enum htt_tx_status status,
888 uint16_t *desc_ids, int num_msdus)
889{
890 int i, index, cat;
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530891 uint32_t now_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
893 uint32_t avg_time_ticks;
894 struct ol_tx_delay_data *data;
895
Anurag Chouhanc5548422016-02-24 18:33:27 +0530896 qdf_assert(num_msdus > 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800897
898 /*
899 * keep static counters for total packet and lost packets
900 * reset them in ol_tx_delay(), function used to fetch the stats
901 */
902
903 cat = ol_tx_delay_category(pdev, desc_ids[0]);
904 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
905 return;
906
907 pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
908 if (status != htt_tx_status_ok) {
909 for (i = 0; i < num_msdus; i++) {
910 cat = ol_tx_delay_category(pdev, desc_ids[i]);
911 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
912 return;
913 pdev->packet_loss_count[cat]++;
914 }
915 return;
916 }
917
918 /* since we may switch the ping-pong index, provide mutex w. readers */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530919 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800920 index = pdev->tx_delay.cats[cat].in_progress_idx;
921
922 data = &pdev->tx_delay.cats[cat].copies[index];
923
924 if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
925 tx_delay_transmit_ticks =
926 now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
927 /*
928 * We'd like to account for the number of MSDUs that were
929 * transmitted together, but we don't know this. All we know
930 * is the number of MSDUs that were acked together.
931 * Since the frame error rate is small, this is nearly the same
932 * as the number of frames transmitted together.
933 */
934 data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
935 data->avgs.transmit_num += num_msdus;
936 }
937 pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
938
939 for (i = 0; i < num_msdus; i++) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940 int bin;
Leo Chang376398b2015-10-23 14:19:02 -0700941 uint16_t id = desc_ids[i];
942 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800943
944 tx_delay_queue_ticks =
945 now_ticks - tx_desc->entry_timestamp_ticks;
946
947 data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
948 data->avgs.queue_num++;
949 bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
950 data->hist_bins_queue[bin]++;
951 }
952
953 /* check if it's time to start a new average */
954 avg_time_ticks =
955 now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
956 if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
957 pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
958 index = 1 - index;
959 pdev->tx_delay.cats[cat].in_progress_idx = index;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530960 qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961 sizeof(pdev->tx_delay.cats[cat].copies[index]));
962 }
963
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530964 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965}
966
967#endif /* QCA_COMPUTE_TX_DELAY */