blob: 9c0940e725712209e76f23b3b71e6398c43b5054 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnson13307732016-12-21 08:39:21 -08002 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053028#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053029#include <qdf_lock.h> /* qdf_os_spinlock */
Anurag Chouhan50220ce2016-02-18 20:11:33 +053030#include <qdf_time.h> /* qdf_system_ticks, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053031#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhanc73697b2016-02-21 15:05:43 +053032#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
34#include <cds_queue.h> /* TAILQ */
35#ifdef QCA_COMPUTE_TX_DELAY
Dustin Brown0bec9a92017-08-17 15:44:34 -070036#include <linux/ieee80211.h> /* ieee80211_frame, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <enet.h> /* ethernet_hdr_t, etc. */
38#include <ipv6_defs.h> /* ipv6_traffic_class */
39#endif
40
41#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
42#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
43#include <ol_txrx_htt_api.h> /* htt_tx_status */
44
45#include <ol_ctrl_txrx_api.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070046#include <cdp_txrx_tx_delay.h>
47#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080048#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
49#ifdef QCA_COMPUTE_TX_DELAY
Siddarth Poddarb2011f62016-04-27 20:45:42 +053050#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051#endif
52#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
53#include <ol_osif_txrx_api.h>
54#include <ol_tx.h> /* ol_tx_reinject */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -070055#include <ol_tx_send.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056
57#include <ol_cfg.h> /* ol_cfg_is_high_latency */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053058#include <ol_tx_sched.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
60#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
61#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +053062#include <ol_tx_queue.h>
63#include <ol_txrx.h>
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +053064#include <pktlog_ac_fmt.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080065#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080066
67#ifdef TX_CREDIT_RECLAIM_SUPPORT
68
69#define OL_TX_CREDIT_RECLAIM(pdev) \
70 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053071 if (qdf_atomic_read(&pdev->target_tx_credit) < \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072 ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
73 ol_osif_ath_tasklet(pdev->osdev); \
74 } \
75 } while (0)
76
77#else
78
79#define OL_TX_CREDIT_RECLAIM(pdev)
80
81#endif /* TX_CREDIT_RECLAIM_SUPPORT */
82
Siddarth Poddarb2011f62016-04-27 20:45:42 +053083#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
84
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085/*
86 * HL needs to keep track of the amount of credit available to download
87 * tx frames to the target - the download scheduler decides when to
88 * download frames, and which frames to download, based on the credit
89 * availability.
90 * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
91 * of the target_tx_credit, to determine when to poll for tx completion
92 * messages.
93 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053094static inline void
95ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
96{
97 qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
98}
99
100static inline void
101ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
102{
103 qdf_atomic_add(delta, &pdev->target_tx_credit);
104}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800105#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530106
107static inline void
108ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
109{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530110}
111
112static inline void
113ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
114{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530115}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800116#endif
117
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530118#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800119void ol_txrx_flow_control_cb(struct cdp_vdev *pvdev, bool tx_resume)
Leo Chang98726762016-10-28 11:07:18 -0700120{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800121 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
Leo Chang98726762016-10-28 11:07:18 -0700122 qdf_spin_lock_bh(&vdev->flow_control_lock);
123 if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
124 vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
125 qdf_spin_unlock_bh(&vdev->flow_control_lock);
126
127 return;
128}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530129
130/**
bings284f8be2017-08-11 10:41:30 +0800131 * ol_txrx_flow_control_is_pause() - is osif paused by flow control
132 * @vdev: vdev handle
133 *
134 * Return: true if osif is paused by flow control
135 */
136static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)
137{
138 bool is_pause = false;
139 if ((vdev->osif_flow_control_is_pause) && (vdev->osif_fc_ctx))
140 is_pause = vdev->osif_flow_control_is_pause(vdev->osif_fc_ctx);
141
142 return is_pause;
143}
144
145/**
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530146 * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
147 * @pdev: physical device object
148 *
149 *
150 * Return: None
151 */
152static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
153{
154 struct ol_txrx_vdev_t *vdev;
Yun Parkf9677152017-04-08 13:29:34 -0700155
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530156 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
bings284f8be2017-08-11 10:41:30 +0800157 if ((qdf_atomic_read(&vdev->os_q_paused) &&
158 (vdev->tx_fl_hwm != 0)) ||
159 ol_txrx_flow_control_is_pause(vdev)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530160 qdf_spin_lock(&pdev->tx_mutex);
161 if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
162 qdf_atomic_set(&vdev->os_q_paused, 0);
163 qdf_spin_unlock(&pdev->tx_mutex);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800164 ol_txrx_flow_control_cb((struct cdp_vdev *)vdev,
165 true);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530166 } else {
167 qdf_spin_unlock(&pdev->tx_mutex);
168 }
169 }
170 }
171}
172#elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
173
174static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
175{
176 struct ol_txrx_vdev_t *vdev;
Yun Parkf9677152017-04-08 13:29:34 -0700177
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530178 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
179 if (qdf_atomic_read(&vdev->os_q_paused) &&
180 (vdev->tx_fl_hwm != 0)) {
181 qdf_spin_lock(&pdev->tx_mutex);
182 if (((ol_tx_desc_pool_size_hl(
183 vdev->pdev->ctrl_pdev) >> 1)
184 - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED)
185 - qdf_atomic_read(&vdev->tx_desc_count)
186 > vdev->tx_fl_hwm) {
187 qdf_atomic_set(&vdev->os_q_paused, 0);
188 qdf_spin_unlock(&pdev->tx_mutex);
189 vdev->osif_flow_control_cb(vdev, true);
190 } else {
191 qdf_spin_unlock(&pdev->tx_mutex);
192 }
193 }
194 }
195}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800196#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197
198static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
199{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530200}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
202
203static inline uint16_t
204ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530205 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800206{
207 int msdu_credit_consumed;
208
Nirav Shahcbc6d722016-03-01 16:24:53 +0530209 TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800210 TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530211 qdf_atomic_read(&pdev->target_tx_credit),
212 qdf_atomic_read(&pdev->target_tx_credit) - 1,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530213 qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214
215 msdu_credit_consumed = htt_tx_msdu_credit(msdu);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530216 ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217 OL_TX_CREDIT_RECLAIM(pdev);
218
219 /*
220 * When the tx frame is downloaded to the target, there are two
221 * outstanding references:
222 * 1. The host download SW (HTT, HTC, HIF)
223 * This reference is cleared by the ol_tx_send_done callback
224 * functions.
225 * 2. The target FW
226 * This reference is cleared by the ol_tx_completion_handler
227 * function.
228 * It is extremely probable that the download completion is processed
229 * before the tx completion message. However, under exceptional
230 * conditions the tx completion may be processed first. Thus, rather
231 * that assuming that reference (1) is done before reference (2),
232 * explicit reference tracking is needed.
233 * Double-increment the ref count to account for both references
234 * described above.
235 */
236
237 OL_TX_DESC_REF_INIT(tx_desc);
238 OL_TX_DESC_REF_INC(tx_desc);
239 OL_TX_DESC_REF_INC(tx_desc);
240
241 return msdu_credit_consumed;
242}
243
244void
245ol_tx_send(struct ol_txrx_pdev_t *pdev,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530246 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800247{
248 int msdu_credit_consumed;
249 uint16_t id;
250 int failed;
251
252 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
253 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530254 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Nirav Shah07e39a62016-04-25 17:46:40 +0530255 DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700256 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah07e39a62016-04-25 17:46:40 +0530257 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530258 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
259 vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260 failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530261 if (qdf_unlikely(failed)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530262 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
264 }
265}
266
267void
268ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530269 qdf_nbuf_t head_msdu, int num_msdus)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800270{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530271 qdf_nbuf_t rejected;
Yun Parkf9677152017-04-08 13:29:34 -0700272
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800273 OL_TX_CREDIT_RECLAIM(pdev);
274
275 rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530276 while (qdf_unlikely(rejected)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277 struct ol_tx_desc_t *tx_desc;
278 uint16_t *msdu_id_storage;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530279 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800280
Nirav Shahcbc6d722016-03-01 16:24:53 +0530281 next = qdf_nbuf_next(rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800282 msdu_id_storage = ol_tx_msdu_id_storage(rejected);
283 tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
284
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530285 ol_tx_target_credit_incr(pdev, rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
287
288 rejected = next;
289 }
290}
291
292void
293ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
294 struct ol_tx_desc_t *tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530295 qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800296{
297 int msdu_credit_consumed;
298 uint16_t id;
299 int failed;
300
301 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
302 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530303 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
305 if (failed) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530306 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307 "Error: freeing tx frame after htt_tx failed");
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530308 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
310 }
311}
312
313static inline void
314ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530315 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316{
317 struct ol_tx_desc_t *tx_desc;
318
319 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530320 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800321
322 /*
323 * If the download is done for
324 * the Management frame then
325 * call the download callback if registered
326 */
327 if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
328 int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
329 ol_txrx_mgmt_tx_cb download_cb =
330 pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb;
331
332 if (download_cb) {
333 download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt,
334 tx_desc->netbuf, status != A_OK);
335 }
336 }
337
338 if (status != A_OK) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530339 ol_tx_target_credit_incr(pdev, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
341 1 /* download err */);
342 } else {
343 if (OL_TX_DESC_NO_REFS(tx_desc)) {
344 /*
345 * The decremented value was zero - free the frame.
346 * Use the tx status recorded previously during
347 * tx completion handling.
348 */
349 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
350 tx_desc->status !=
351 htt_tx_status_ok);
352 }
353 }
354}
355
356void
357ol_tx_download_done_ll(void *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530358 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800359{
360 ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
361 msdu_id);
362}
363
364void
365ol_tx_download_done_hl_retain(void *txrx_pdev,
366 A_STATUS status,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530367 qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800368{
369 struct ol_txrx_pdev_t *pdev = txrx_pdev;
Yun Parkf9677152017-04-08 13:29:34 -0700370
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
372}
373
374void
375ol_tx_download_done_hl_free(void *txrx_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530376 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377{
378 struct ol_txrx_pdev_t *pdev = txrx_pdev;
379 struct ol_tx_desc_t *tx_desc;
380
381 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530382 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383
384 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
385
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700386 if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800387 (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530388 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800389 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
390 }
391}
392
393void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
394{
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530395 qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396}
397
398void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
399{
400 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530401 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402 credit_delta,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530403 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 credit_delta);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530405 qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406}
407
408#ifdef QCA_COMPUTE_TX_DELAY
409
410static void
411ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
412 enum htt_tx_status status,
413 uint16_t *desc_ids, int num_msdus);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530414
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800415#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530416static inline void
417ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
418 enum htt_tx_status status,
419 uint16_t *desc_ids, int num_msdus)
420{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530421}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422#endif /* QCA_COMPUTE_TX_DELAY */
423
424#ifndef OL_TX_RESTORE_HDR
425#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
426#endif
427/*
428 * The following macros could have been inline functions too.
429 * The only rationale for choosing macros, is to force the compiler to inline
430 * the implementation, which cannot be controlled for actual "inline" functions,
431 * since "inline" is only a hint to the compiler.
432 * In the performance path, we choose to force the inlining, in preference to
433 * type-checking offered by the actual inlined functions.
434 */
435#define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
436 TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
437#ifndef ATH_11AC_TXCOMPACT
438#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
439 _lcl_freelist, _tx_desc_last) \
440 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530441 qdf_atomic_init(&(_tx_desc)->ref_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442 /* restore orginal hdr offset */ \
443 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530444 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
445 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
447 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530448 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
450 (_tx_desc); \
451 } \
452 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
453 } while (0)
454#else /*!ATH_11AC_TXCOMPACT */
455#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
456 _lcl_freelist, _tx_desc_last) \
457 do { \
458 /* restore orginal hdr offset */ \
459 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530460 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
461 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
463 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530464 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
466 (_tx_desc); \
467 } \
468 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
469 } while (0)
470
471#endif /*!ATH_11AC_TXCOMPACT */
472
473#ifdef QCA_TX_SINGLE_COMPLETIONS
474#ifdef QCA_TX_STD_PATH_ONLY
475#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
476 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800477 _tx_desc_last, _status, is_tx_desc_freed) \
Yun Parkf9677152017-04-08 13:29:34 -0700478 { \
gbian1bd297c2016-12-07 11:12:29 +0800479 is_tx_desc_freed = 0; \
480 ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
481 (_netbuf), (_lcl_freelist), \
482 _tx_desc_last) \
Yun Parkf9677152017-04-08 13:29:34 -0700483 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484#else /* !QCA_TX_STD_PATH_ONLY */
485#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
486 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800487 _tx_desc_last, _status, is_tx_desc_freed) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800488 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700489 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
gbian1bd297c2016-12-07 11:12:29 +0800490 is_tx_desc_freed = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491 ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
492 (_netbuf), (_lcl_freelist), \
493 (_tx_desc_last)); \
494 } else { \
gbian1bd297c2016-12-07 11:12:29 +0800495 is_tx_desc_freed = 1; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496 ol_tx_desc_frame_free_nonstd( \
497 (_pdev), (_tx_desc), \
498 (_status) != htt_tx_status_ok); \
499 } \
500 } while (0)
501#endif /* !QCA_TX_STD_PATH_ONLY */
502#else /* !QCA_TX_SINGLE_COMPLETIONS */
503#ifdef QCA_TX_STD_PATH_ONLY
504#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
505 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800506 _tx_desc_last, _status, is_tx_desc_freed) \
Yun Parkf9677152017-04-08 13:29:34 -0700507 { \
gbian1bd297c2016-12-07 11:12:29 +0800508 is_tx_desc_freed = 0; \
509 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
510 (_tx_descs), (_status)) \
Yun Parkf9677152017-04-08 13:29:34 -0700511 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800512#else /* !QCA_TX_STD_PATH_ONLY */
513#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
514 _netbuf, _lcl_freelist, \
gbian1bd297c2016-12-07 11:12:29 +0800515 _tx_desc_last, _status, is_tx_desc_freed) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700517 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
gbian1bd297c2016-12-07 11:12:29 +0800518 is_tx_desc_freed = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800519 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
520 (_tx_descs), (_status)); \
521 } else { \
gbian1bd297c2016-12-07 11:12:29 +0800522 is_tx_desc_freed = 1; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523 ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
524 (_status) != \
525 htt_tx_status_ok); \
526 } \
527 } while (0)
528#endif /* !QCA_TX_STD_PATH_ONLY */
529#endif /* QCA_TX_SINGLE_COMPLETIONS */
530
Yu Wang053d3e72017-02-08 18:48:24 +0800531#if !defined(CONFIG_HL_SUPPORT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
533{
534 int i = 0;
Leo Chang376398b2015-10-23 14:19:02 -0700535 struct ol_tx_desc_t *tx_desc;
Mohit Khannac3b069b2017-02-17 14:51:51 -0800536 int num_disarded = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537
Leo Chang376398b2015-10-23 14:19:02 -0700538 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
539 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 /*
541 * Confirm that each tx descriptor is "empty", i.e. it has
542 * no tx frame attached.
543 * In particular, check that there are no frames that have
544 * been given to the target to transmit, for which the
545 * target has never provided a response.
546 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530547 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530548 ol_txrx_dbg(
Mohit Khannac3b069b2017-02-17 14:51:51 -0800549 "Warning: freeing tx desc %d", tx_desc->id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800550 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800551 tx_desc, 1);
Mohit Khannac3b069b2017-02-17 14:51:51 -0800552 num_disarded++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800553 }
554 }
Mohit Khannac3b069b2017-02-17 14:51:51 -0800555
556 if (num_disarded)
557 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Yun Parkf9677152017-04-08 13:29:34 -0700558 "Warning: freed %d tx descs for which no tx completion rcvd from the target",
559 num_disarded);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560}
Yu Wang053d3e72017-02-08 18:48:24 +0800561#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800562
563void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
564{
565 ol_tx_target_credit_update(pdev, credits);
566
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530567 if (pdev->cfg.is_high_latency)
568 ol_tx_sched(pdev);
569
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530571 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572}
573
Yu Wangceb357b2017-06-01 12:04:18 +0800574#ifdef WLAN_FEATURE_TSF_PLUS
575static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
576 u_int32_t *msg_word, int num_msdus)
577{
578 u_int32_t has_tx_tsf;
579 u_int32_t has_retry;
580 struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
581 struct htt_tx_compl_ind_append_retries *retry_list = NULL;
582 int offset_dwords;
583
584 has_tx_tsf = HTT_TX_COMPL_IND_APPEND1_GET(*msg_word);
585 if (num_msdus <= 0 || !has_tx_tsf)
586 return NULL;
587
588 offset_dwords = 1 + ((num_msdus + 1) >> 1);
589
590 has_retry = HTT_TX_COMPL_IND_APPEND_GET(*msg_word);
591 if (has_retry) {
592 int retry_index = 0;
593 int width_for_each_retry =
594 (sizeof(struct htt_tx_compl_ind_append_retries) +
595 3) >> 2;
596
597 retry_list = (struct htt_tx_compl_ind_append_retries *)
598 (msg_word + offset_dwords);
599 while (retry_list) {
600 if (retry_list[retry_index++].flag == 0)
601 break;
602 }
603 offset_dwords += retry_index * width_for_each_retry;
604 }
605 txtstamp_list = (struct htt_tx_compl_ind_append_tx_tstamp *)
606 (msg_word + offset_dwords);
607
608 return txtstamp_list;
609}
610
611static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
612 qdf_nbuf_t netbuf, u_int64_t ts)
613{
614 if (!netbuf)
615 return;
616
617 if (pdev->ol_tx_timestamp_cb)
618 pdev->ol_tx_timestamp_cb(netbuf, ts);
619}
620#else
621static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
622 u_int32_t *msg_word, int num_msdus)
623{
624 return NULL;
625}
626
627static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
628 qdf_nbuf_t netbuf, u_int64_t ts)
629{
630}
631#endif
632
Yun Parkf9677152017-04-08 13:29:34 -0700633/**
634 * WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
635 * ol_tx_completion_handler().
636 * any change in ol_tx_completion_handler() must be mirrored in
637 * ol_tx_inspect_handler().
638 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639void
640ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
641 int num_msdus,
Yu Wangceb357b2017-06-01 12:04:18 +0800642 enum htt_tx_status status, void *msg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800643{
644 int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800645 uint16_t tx_desc_id;
646 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800647 uint32_t byte_cnt = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530648 qdf_nbuf_t netbuf;
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +0530649 tp_ol_packetdump_cb packetdump_cb;
gbian1bd297c2016-12-07 11:12:29 +0800650 uint32_t is_tx_desc_freed = 0;
Yu Wangceb357b2017-06-01 12:04:18 +0800651 struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
652 u_int32_t *msg_word = (u_int32_t *)msg;
653 u_int16_t *desc_ids = (u_int16_t *)(msg_word + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800654
655 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
656 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
657 ol_tx_desc_list tx_descs;
658 TAILQ_INIT(&tx_descs);
659
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530660 ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
Yu Wangceb357b2017-06-01 12:04:18 +0800661 if (status == htt_tx_status_ok)
662 txtstamp_list = ol_tx_get_txtstamps(msg_word, num_msdus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 for (i = 0; i < num_msdus; i++) {
665 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700666 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800667 tx_desc->status = status;
668 netbuf = tx_desc->netbuf;
Yu Wangceb357b2017-06-01 12:04:18 +0800669
670 if (txtstamp_list)
671 ol_tx_timestamp(pdev, netbuf,
672 (u_int64_t)txtstamp_list->timestamp[i]
673 );
674
Himanshu Agarwal89034612016-07-19 15:59:52 +0530675 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530676
677 if (tx_desc->pkt_type != OL_TX_FRM_TSO) {
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +0530678 packetdump_cb = pdev->ol_tx_packetdump_cb;
679 if (packetdump_cb)
680 packetdump_cb(netbuf, status,
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530681 tx_desc->vdev->vdev_id, TX_DATA_PKT);
682 }
683
Nirav Shah07e39a62016-04-25 17:46:40 +0530684 DPTRACE(qdf_dp_trace_ptr(netbuf,
685 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
Venkata Sharath Chandra Manchala0b9fc632017-05-15 14:35:15 -0700686 QDF_TRACE_DEFAULT_PDEV_ID,
Nirav Shah07e39a62016-04-25 17:46:40 +0530687 qdf_nbuf_data_addr(netbuf),
688 sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status));
Houston Hoffmanc2c47622016-07-12 13:05:31 -0700689 htc_pm_runtime_put(pdev->htt_pdev->htc_pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530690 ol_tx_desc_update_group_credit(pdev, tx_desc_id, 1, 0, status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691 /* Per SDU update of byte count */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530692 byte_cnt += qdf_nbuf_len(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 if (OL_TX_DESC_NO_REFS(tx_desc)) {
694 ol_tx_statistics(
695 pdev->ctrl_pdev,
696 HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
697 (tx_desc->
698 htt_tx_desc))),
699 status != htt_tx_status_ok);
700 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
gbian1bd297c2016-12-07 11:12:29 +0800701 lcl_freelist, tx_desc_last, status,
702 is_tx_desc_freed);
703
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800704#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
gbian1bd297c2016-12-07 11:12:29 +0800705 if (!is_tx_desc_freed) {
706 tx_desc->pkt_type = ol_tx_frm_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800707#ifdef QCA_COMPUTE_TX_DELAY
gbian1bd297c2016-12-07 11:12:29 +0800708 tx_desc->entry_timestamp_ticks = 0xffffffff;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709#endif
gbian1bd297c2016-12-07 11:12:29 +0800710 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800711#endif
gbian1bd297c2016-12-07 11:12:29 +0800712 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800713 }
714
715 /* One shot protected access to pdev freelist, when setup */
716 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530717 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 tx_desc_last->next = pdev->tx_desc.freelist;
719 pdev->tx_desc.freelist = lcl_freelist;
720 pdev->tx_desc.num_free += (uint16_t) num_msdus;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530721 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 } else {
723 ol_tx_desc_frame_list_free(pdev, &tx_descs,
724 status != htt_tx_status_ok);
725 }
726
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530727 if (pdev->cfg.is_high_latency) {
728 /*
729 * Credit was already explicitly updated by HTT,
730 * but update the number of available tx descriptors,
731 * then invoke the scheduler, since new credit is probably
732 * available now.
733 */
734 qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
735 ol_tx_sched(pdev);
736 } else {
737 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
738 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800739
740 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530741 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800742 /* Do one shot statistics */
743 TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
744}
745
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530746#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
747
748void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
749 u_int16_t tx_desc_id, int credit, u_int8_t absolute,
750 enum htt_tx_status status)
751{
752 uint8_t i, is_member;
753 uint16_t vdev_id_mask;
754 struct ol_tx_desc_t *tx_desc;
755
756 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
757 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
758 vdev_id_mask =
759 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
760 pdev->txq_grps[i].membership);
761 is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
gbian016a42e2017-03-01 18:49:11 +0800762 tx_desc->vdev_id);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530763 if (is_member) {
764 ol_txrx_update_group_credit(&pdev->txq_grps[i],
765 credit, absolute);
766 break;
767 }
768 }
769 ol_tx_update_group_credit_stats(pdev);
770}
771
772#ifdef DEBUG_HL_LOGGING
773
774void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
775{
776 uint16_t curr_index;
777 uint8_t i;
778
779 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
780 pdev->grp_stats.last_valid_index++;
781 if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
782 - 1)) {
783 pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
784 pdev->grp_stats.wrap_around = 1;
785 }
786 curr_index = pdev->grp_stats.last_valid_index;
787
788 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
789 pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
790 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
791 pdev->txq_grps[i].membership);
792 pdev->grp_stats.stats[curr_index].grp[i].credit =
793 qdf_atomic_read(&pdev->txq_grps[i].credit);
794 }
795
796 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
797}
798
799void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
800{
801 uint16_t i, j, is_break = 0;
802 int16_t curr_index, old_index, wrap_around;
803 uint16_t curr_credit, old_credit, mem_vdevs;
804
805 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
806 "Group credit stats:");
807 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
808 " No: GrpID: Credit: Change: vdev_map");
809
810 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
811 curr_index = pdev->grp_stats.last_valid_index;
812 wrap_around = pdev->grp_stats.wrap_around;
813 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
814
815 if (curr_index < 0) {
816 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
817 "Not initialized");
818 return;
819 }
820
821 for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
822 old_index = curr_index - 1;
823 if (old_index < 0) {
824 if (wrap_around == 0)
825 is_break = 1;
826 else
827 old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
828 }
829
830 for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
831 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
832 curr_credit =
833 pdev->grp_stats.stats[curr_index].
834 grp[j].credit;
835 if (!is_break)
836 old_credit =
837 pdev->grp_stats.stats[old_index].
838 grp[j].credit;
839
840 mem_vdevs =
841 pdev->grp_stats.stats[curr_index].grp[j].
842 member_vdevs;
843 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
844
845 if (!is_break)
846 QDF_TRACE(QDF_MODULE_ID_TXRX,
847 QDF_TRACE_LEVEL_ERROR,
848 "%4d: %5d: %6d %6d %8x",
849 curr_index, j,
850 curr_credit,
851 (curr_credit - old_credit),
852 mem_vdevs);
853 else
854 QDF_TRACE(QDF_MODULE_ID_TXRX,
855 QDF_TRACE_LEVEL_ERROR,
856 "%4d: %5d: %6d %6s %8x",
857 curr_index, j,
858 curr_credit, "NA", mem_vdevs);
859 }
860
861 if (is_break)
862 break;
863
864 curr_index = old_index;
865 }
866}
867
868void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
869{
870 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
871 qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
872 pdev->grp_stats.last_valid_index = -1;
873 pdev->grp_stats.wrap_around = 0;
874 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
875}
876#endif
877#endif
878
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800879/*
880 * ol_tx_single_completion_handler performs the same tx completion
881 * processing as ol_tx_completion_handler, but for a single frame.
882 * ol_tx_completion_handler is optimized to handle batch completions
883 * as efficiently as possible; in contrast ol_tx_single_completion_handler
884 * handles single frames as simply and generally as possible.
885 * Thus, this ol_tx_single_completion_handler function is suitable for
886 * intermittent usage, such as for tx mgmt frames.
887 */
888void
889ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
890 enum htt_tx_status status, uint16_t tx_desc_id)
891{
892 struct ol_tx_desc_t *tx_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530893 qdf_nbuf_t netbuf;
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +0530894 tp_ol_packetdump_cb packetdump_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895
gbiane55c9562016-11-01 14:47:47 +0800896 tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
897 if (tx_desc == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530898 ol_txrx_err(
gbiane55c9562016-11-01 14:47:47 +0800899 "%s: invalid desc_id(%u), ignore it.\n",
900 __func__,
901 tx_desc_id);
902 return;
903 }
904
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 tx_desc->status = status;
906 netbuf = tx_desc->netbuf;
907
Nirav Shahcbc6d722016-03-01 16:24:53 +0530908 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800909 /* Do one shot statistics */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530910 TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800911
Himanshu Agarwalbb226bc2017-01-18 20:45:01 +0530912 packetdump_cb = pdev->ol_tx_packetdump_cb;
913 if (packetdump_cb)
914 packetdump_cb(netbuf, status,
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530915 tx_desc->vdev->vdev_id, TX_MGMT_PKT);
916
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800917 if (OL_TX_DESC_NO_REFS(tx_desc)) {
918 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
919 status != htt_tx_status_ok);
920 }
921
922 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530923 qdf_atomic_read(&pdev->target_tx_credit),
924 1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800925
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530926 if (pdev->cfg.is_high_latency) {
927 /*
928 * Credit was already explicitly updated by HTT,
929 * but update the number of available tx descriptors,
930 * then invoke the scheduler, since new credit is probably
931 * available now.
932 */
933 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
934 ol_tx_sched(pdev);
935 } else {
936 qdf_atomic_add(1, &pdev->target_tx_credit);
937 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800938}
939
Yun Parkf9677152017-04-08 13:29:34 -0700940/**
941 * WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
942 * ol_tx_completion_handler().
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800943 * any change in ol_tx_completion_handler() must be mirrored here.
944 */
945void
946ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
947 int num_msdus, void *tx_desc_id_iterator)
948{
949 uint16_t vdev_id, i;
950 struct ol_txrx_vdev_t *vdev;
951 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
952 uint16_t tx_desc_id;
953 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800954 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
955 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530956 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957 ol_tx_desc_list tx_descs;
gbian1bd297c2016-12-07 11:12:29 +0800958 uint32_t is_tx_desc_freed = 0;
959
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800960 TAILQ_INIT(&tx_descs);
961
962 for (i = 0; i < num_msdus; i++) {
963 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700964 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965 netbuf = tx_desc->netbuf;
966
967 /* find the "vdev" this tx_desc belongs to */
968 vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
969 (tx_desc->htt_tx_desc)));
970 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
971 if (vdev->vdev_id == vdev_id)
972 break;
973 }
974
975 /* vdev now points to the vdev for this descriptor. */
976
977#ifndef ATH_11AC_TXCOMPACT
978 /* save this multicast packet to local free list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530979 if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980#endif
981 {
Yun Parkf9677152017-04-08 13:29:34 -0700982 /*
983 * For this function only, force htt status to be
984 * "htt_tx_status_ok"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800985 * for graceful freeing of this multicast frame
986 */
987 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
988 lcl_freelist, tx_desc_last,
gbian1bd297c2016-12-07 11:12:29 +0800989 htt_tx_status_ok,
990 is_tx_desc_freed);
gbiane55c9562016-11-01 14:47:47 +0800991
992#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
gbian1bd297c2016-12-07 11:12:29 +0800993 if (!is_tx_desc_freed) {
994 tx_desc->pkt_type = ol_tx_frm_freed;
gbiane55c9562016-11-01 14:47:47 +0800995#ifdef QCA_COMPUTE_TX_DELAY
gbian1bd297c2016-12-07 11:12:29 +0800996 tx_desc->entry_timestamp_ticks = 0xffffffff;
gbiane55c9562016-11-01 14:47:47 +0800997#endif
gbian1bd297c2016-12-07 11:12:29 +0800998 }
gbiane55c9562016-11-01 14:47:47 +0800999#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001000 }
1001 }
1002
1003 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301004 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001005 tx_desc_last->next = pdev->tx_desc.freelist;
1006 pdev->tx_desc.freelist = lcl_freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301007 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008 } else {
1009 ol_tx_desc_frame_list_free(pdev, &tx_descs,
1010 htt_tx_status_discard);
1011 }
1012 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301013 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001014 num_msdus,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301015 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016 num_msdus);
1017
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301018 if (pdev->cfg.is_high_latency) {
1019 /* credit was already explicitly updated by HTT */
1020 ol_tx_sched(pdev);
1021 } else {
1022 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1023 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001024}
1025
1026#ifdef QCA_COMPUTE_TX_DELAY
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001027/**
1028 * @brief updates the compute interval period for TSM stats.
1029 * @details
1030 * @param interval - interval for stats computation
1031 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001032void ol_tx_set_compute_interval(struct cdp_pdev *ppdev, uint32_t interval)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001033{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001034 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301035 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001036}
1037
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -07001038/**
1039 * @brief Return the uplink (transmitted) packet count and loss count.
1040 * @details
1041 * This function will be called for getting uplink packet count and
1042 * loss count for given stream (access category) a regular interval.
1043 * This also resets the counters hence, the value returned is packets
1044 * counted in last 5(default) second interval. These counter are
1045 * incremented per access category in ol_tx_completion_handler()
1046 *
1047 * @param category - access category of interest
1048 * @param out_packet_count - number of packets transmitted
1049 * @param out_packet_loss_count - number of packets lost
1050 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001052ol_tx_packet_count(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001053 uint16_t *out_packet_count,
1054 uint16_t *out_packet_loss_count, int category)
1055{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001056 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001057 *out_packet_count = pdev->packet_count[category];
1058 *out_packet_loss_count = pdev->packet_loss_count[category];
1059 pdev->packet_count[category] = 0;
1060 pdev->packet_loss_count[category] = 0;
1061}
1062
Jeff Johnsonf89f58f2016-10-14 09:58:29 -07001063static uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064{
1065 uint32_t sum32;
1066 int shift = 0;
1067 /*
1068 * To avoid doing a 64-bit divide, shift the sum down until it is
1069 * no more than 32 bits (and shift the denominator to match).
1070 */
1071 while ((sum >> 32) != 0) {
1072 sum >>= 1;
1073 shift++;
1074 }
1075 sum32 = (uint32_t) sum;
1076 num >>= shift;
1077 return (sum32 + (num >> 1)) / num; /* round to nearest */
1078}
1079
1080void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001081ol_tx_delay(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001082 uint32_t *queue_delay_microsec,
1083 uint32_t *tx_delay_microsec, int category)
1084{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001085 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086 int index;
1087 uint32_t avg_delay_ticks;
1088 struct ol_tx_delay_data *data;
1089
Anurag Chouhanc5548422016-02-24 18:33:27 +05301090 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301092 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001093 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1094
1095 data = &pdev->tx_delay.cats[category].copies[index];
1096
1097 if (data->avgs.transmit_num > 0) {
1098 avg_delay_ticks =
1099 ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
1100 data->avgs.transmit_num);
1101 *tx_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301102 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001103 } else {
1104 /*
1105 * This case should only happen if there's a query
1106 * within 5 sec after the first tx data frame.
1107 */
1108 *tx_delay_microsec = 0;
1109 }
1110 if (data->avgs.queue_num > 0) {
1111 avg_delay_ticks =
1112 ol_tx_delay_avg(data->avgs.queue_sum_ticks,
1113 data->avgs.queue_num);
1114 *queue_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301115 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116 } else {
1117 /*
1118 * This case should only happen if there's a query
1119 * within 5 sec after the first tx data frame.
1120 */
1121 *queue_delay_microsec = 0;
1122 }
1123
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301124 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001125}
1126
1127void
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001128ol_tx_delay_hist(struct cdp_pdev *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001129 uint16_t *report_bin_values, int category)
1130{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001131 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132 int index, i, j;
1133 struct ol_tx_delay_data *data;
1134
Anurag Chouhanc5548422016-02-24 18:33:27 +05301135 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001136
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301137 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001138 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1139
1140 data = &pdev->tx_delay.cats[category].copies[index];
1141
1142 for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
1143 uint16_t internal_bin_sum = 0;
Yun Parkf9677152017-04-08 13:29:34 -07001144
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001145 while (j < (1 << i))
1146 internal_bin_sum += data->hist_bins_queue[j++];
1147
1148 report_bin_values[i] = internal_bin_sum;
1149 }
1150 report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
1151
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301152 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153}
1154
1155#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001156
1157static uint8_t
1158ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301159 qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160{
1161 uint16_t ethertype;
1162 uint8_t *dest_addr, *l3_hdr;
1163 int is_mgmt, is_mcast;
1164 int l2_hdr_size;
1165
1166 dest_addr = ol_tx_dest_addr_find(pdev, msdu);
1167 if (NULL == dest_addr)
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301168 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001169
1170 is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
1171 is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
1172 if (is_mgmt) {
1173 return (is_mcast) ?
1174 OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
1175 HTT_TX_EXT_TID_MGMT;
1176 }
1177 if (is_mcast)
1178 return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
1179
1180 if (pdev->frame_format == wlan_frm_fmt_802_3) {
1181 struct ethernet_hdr_t *enet_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001182
Nirav Shahcbc6d722016-03-01 16:24:53 +05301183 enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001184 l2_hdr_size = sizeof(struct ethernet_hdr_t);
1185 ethertype =
1186 (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
1187 if (!IS_ETHERTYPE(ethertype)) {
1188 struct llc_snap_hdr_t *llc_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001189
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001190 llc_hdr = (struct llc_snap_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301191 (qdf_nbuf_data(msdu) + l2_hdr_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001192 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1193 ethertype =
1194 (llc_hdr->ethertype[0] << 8) | llc_hdr->
1195 ethertype[1];
1196 }
1197 } else {
1198 struct llc_snap_hdr_t *llc_hdr;
Yun Parkf9677152017-04-08 13:29:34 -07001199
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001200 l2_hdr_size = sizeof(struct ieee80211_frame);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301201 llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001202 + l2_hdr_size);
1203 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1204 ethertype =
1205 (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1206 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301207 l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001208 if (ETHERTYPE_IPV4 == ethertype) {
1209 return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
1210 } else if (ETHERTYPE_IPV6 == ethertype) {
1211 return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
1212 0x7;
1213 } else {
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301214 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001215 }
1216}
1217#endif
1218
1219static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1220{
1221#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Leo Chang376398b2015-10-23 14:19:02 -07001222 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001223 uint8_t tid;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301224 qdf_nbuf_t msdu = tx_desc->netbuf;
Yun Parkf9677152017-04-08 13:29:34 -07001225
Nirav Shahcbc6d722016-03-01 16:24:53 +05301226 tid = qdf_nbuf_get_tid(msdu);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301227 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001228 tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301229 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Yun Parkf9677152017-04-08 13:29:34 -07001230 /*
1231 * TID could not be determined
1232 * (this is not an IP frame?)
1233 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001234 return -EINVAL;
1235 }
1236 }
1237 return tid;
1238#else
1239 return 0;
1240#endif
1241}
1242
1243static inline int
1244ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
1245{
1246 int bin;
1247 /*
1248 * For speed, multiply and shift to approximate a divide. This causes
1249 * a small error, but the approximation error should be much less
1250 * than the other uncertainties in the tx delay computation.
1251 */
1252 bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
1253 pdev->tx_delay.hist_internal_bin_width_shift;
1254 if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
1255 bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
1256
1257 return bin;
1258}
1259
1260static void
1261ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
1262 enum htt_tx_status status,
1263 uint16_t *desc_ids, int num_msdus)
1264{
1265 int i, index, cat;
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301266 uint32_t now_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
1268 uint32_t avg_time_ticks;
1269 struct ol_tx_delay_data *data;
1270
Anurag Chouhanc5548422016-02-24 18:33:27 +05301271 qdf_assert(num_msdus > 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001272
1273 /*
1274 * keep static counters for total packet and lost packets
1275 * reset them in ol_tx_delay(), function used to fetch the stats
1276 */
1277
1278 cat = ol_tx_delay_category(pdev, desc_ids[0]);
1279 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1280 return;
1281
1282 pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
1283 if (status != htt_tx_status_ok) {
1284 for (i = 0; i < num_msdus; i++) {
1285 cat = ol_tx_delay_category(pdev, desc_ids[i]);
1286 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1287 return;
1288 pdev->packet_loss_count[cat]++;
1289 }
1290 return;
1291 }
1292
1293 /* since we may switch the ping-pong index, provide mutex w. readers */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301294 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001295 index = pdev->tx_delay.cats[cat].in_progress_idx;
1296
1297 data = &pdev->tx_delay.cats[cat].copies[index];
1298
1299 if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
1300 tx_delay_transmit_ticks =
1301 now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
1302 /*
1303 * We'd like to account for the number of MSDUs that were
1304 * transmitted together, but we don't know this. All we know
1305 * is the number of MSDUs that were acked together.
1306 * Since the frame error rate is small, this is nearly the same
1307 * as the number of frames transmitted together.
1308 */
1309 data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
1310 data->avgs.transmit_num += num_msdus;
1311 }
1312 pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
1313
1314 for (i = 0; i < num_msdus; i++) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315 int bin;
Leo Chang376398b2015-10-23 14:19:02 -07001316 uint16_t id = desc_ids[i];
1317 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001318
1319 tx_delay_queue_ticks =
1320 now_ticks - tx_desc->entry_timestamp_ticks;
1321
1322 data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
1323 data->avgs.queue_num++;
1324 bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
1325 data->hist_bins_queue[bin]++;
1326 }
1327
1328 /* check if it's time to start a new average */
1329 avg_time_ticks =
1330 now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
1331 if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
1332 pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
1333 index = 1 - index;
1334 pdev->tx_delay.cats[cat].in_progress_idx = index;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301335 qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336 sizeof(pdev->tx_delay.cats[cat].copies[index]));
1337 }
1338
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301339 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001340}
1341
1342#endif /* QCA_COMPUTE_TX_DELAY */
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301343
1344/**
1345 * ol_register_packetdump_callback() - registers
1346 * tx data packet, tx mgmt. packet and rx data packet
1347 * dump callback handler.
1348 *
1349 * @ol_tx_packetdump_cb: tx packetdump cb
1350 * @ol_rx_packetdump_cb: rx packetdump cb
1351 *
1352 * This function is used to register tx data pkt, tx mgmt.
1353 * pkt and rx data pkt dump callback
1354 *
1355 * Return: None
1356 *
1357 */
1358void ol_register_packetdump_callback(tp_ol_packetdump_cb ol_tx_packetdump_cb,
1359 tp_ol_packetdump_cb ol_rx_packetdump_cb)
1360{
1361 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1362
1363 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301364 ol_txrx_err(
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301365 "%s: pdev is NULL", __func__);
1366 return;
1367 }
1368
1369 pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb;
1370 pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb;
1371}
1372
1373/**
1374 * ol_deregister_packetdump_callback() - deregidters
1375 * tx data packet, tx mgmt. packet and rx data packet
1376 * dump callback handler
1377 *
1378 * This function is used to deregidter tx data pkt.,
1379 * tx mgmt. pkt and rx data pkt. dump callback
1380 *
1381 * Return: None
1382 *
1383 */
1384void ol_deregister_packetdump_callback(void)
1385{
1386 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1387
1388 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +05301389 ol_txrx_err(
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +05301390 "%s: pdev is NULL", __func__);
1391 return;
1392 }
1393
1394 pdev->ol_tx_packetdump_cb = NULL;
1395 pdev->ol_rx_packetdump_cb = NULL;
1396}
Yu Wangceb357b2017-06-01 12:04:18 +08001397
1398#ifdef WLAN_FEATURE_TSF_PLUS
1399void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)
1400{
1401 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1402
1403 if (!pdev) {
1404 ol_txrx_err("%s: pdev is NULL", __func__);
1405 return;
1406 }
1407 pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb;
1408}
1409
1410void ol_deregister_timestamp_callback(void)
1411{
1412 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1413
1414 if (!pdev) {
1415 ol_txrx_err("%s: pdev is NULL", __func__);
1416 return;
1417 }
1418 pdev->ol_tx_timestamp_cb = NULL;
1419}
1420#endif