blob: 2ca1a8d7fa720aa4bfbb75282c8e113255eb0643 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffman1460fa32015-11-18 02:36:30 -08002 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053028#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053029#include <qdf_lock.h> /* qdf_os_spinlock */
Anurag Chouhan50220ce2016-02-18 20:11:33 +053030#include <qdf_time.h> /* qdf_system_ticks, etc. */
Nirav Shahcbc6d722016-03-01 16:24:53 +053031#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhanc73697b2016-02-21 15:05:43 +053032#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
34#include <cds_queue.h> /* TAILQ */
35#ifdef QCA_COMPUTE_TX_DELAY
36#include <ieee80211.h> /* ieee80211_frame, etc. */
37#include <enet.h> /* ethernet_hdr_t, etc. */
38#include <ipv6_defs.h> /* ipv6_traffic_class */
39#endif
40
41#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
42#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
43#include <ol_txrx_htt_api.h> /* htt_tx_status */
44
45#include <ol_ctrl_txrx_api.h>
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070046#include <cdp_txrx_tx_delay.h>
47#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080048#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
49#ifdef QCA_COMPUTE_TX_DELAY
Siddarth Poddarb2011f62016-04-27 20:45:42 +053050#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051#endif
52#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
53#include <ol_osif_txrx_api.h>
54#include <ol_tx.h> /* ol_tx_reinject */
55
56#include <ol_cfg.h> /* ol_cfg_is_high_latency */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053057#include <ol_tx_sched.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
59#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
60#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +053061#include <ol_tx_queue.h>
62#include <ol_txrx.h>
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -070063
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080064
65#ifdef TX_CREDIT_RECLAIM_SUPPORT
66
67#define OL_TX_CREDIT_RECLAIM(pdev) \
68 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053069 if (qdf_atomic_read(&pdev->target_tx_credit) < \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080070 ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
71 ol_osif_ath_tasklet(pdev->osdev); \
72 } \
73 } while (0)
74
75#else
76
77#define OL_TX_CREDIT_RECLAIM(pdev)
78
79#endif /* TX_CREDIT_RECLAIM_SUPPORT */
80
Siddarth Poddarb2011f62016-04-27 20:45:42 +053081#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
82
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083/*
84 * HL needs to keep track of the amount of credit available to download
85 * tx frames to the target - the download scheduler decides when to
86 * download frames, and which frames to download, based on the credit
87 * availability.
88 * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
89 * of the target_tx_credit, to determine when to poll for tx completion
90 * messages.
91 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053092static inline void
93ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
94{
95 qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
96}
97
98static inline void
99ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
100{
101 qdf_atomic_add(delta, &pdev->target_tx_credit);
102}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800103#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530104
105static inline void
106ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
107{
108 return;
109}
110
111static inline void
112ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
113{
114 return;
115}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800116#endif
117
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530118#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
Leo Chang98726762016-10-28 11:07:18 -0700119/**
120 * ol_txrx_flow_control_cb() - call osif flow control callback
121 * @vdev: vdev handle
122 * @tx_resume: tx resume flag
123 *
124 * Return: none
125 */
126void ol_txrx_flow_control_cb(void *pvdev, bool tx_resume)
127{
128 struct ol_txrx_vdev_t *vdev = pvdev;
129 qdf_spin_lock_bh(&vdev->flow_control_lock);
130 if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
131 vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
132 qdf_spin_unlock_bh(&vdev->flow_control_lock);
133
134 return;
135}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530136
137/**
138 * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
139 * @pdev: physical device object
140 *
141 *
142 * Return: None
143 */
144static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
145{
146 struct ol_txrx_vdev_t *vdev;
147 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
148 if (qdf_atomic_read(&vdev->os_q_paused) &&
149 (vdev->tx_fl_hwm != 0)) {
150 qdf_spin_lock(&pdev->tx_mutex);
151 if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
152 qdf_atomic_set(&vdev->os_q_paused, 0);
153 qdf_spin_unlock(&pdev->tx_mutex);
154 ol_txrx_flow_control_cb(vdev, true);
155 } else {
156 qdf_spin_unlock(&pdev->tx_mutex);
157 }
158 }
159 }
160}
161#elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
162
163static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
164{
165 struct ol_txrx_vdev_t *vdev;
166 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
167 if (qdf_atomic_read(&vdev->os_q_paused) &&
168 (vdev->tx_fl_hwm != 0)) {
169 qdf_spin_lock(&pdev->tx_mutex);
170 if (((ol_tx_desc_pool_size_hl(
171 vdev->pdev->ctrl_pdev) >> 1)
172 - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED)
173 - qdf_atomic_read(&vdev->tx_desc_count)
174 > vdev->tx_fl_hwm) {
175 qdf_atomic_set(&vdev->os_q_paused, 0);
176 qdf_spin_unlock(&pdev->tx_mutex);
177 vdev->osif_flow_control_cb(vdev, true);
178 } else {
179 qdf_spin_unlock(&pdev->tx_mutex);
180 }
181 }
182 }
183}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530185
186static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
187{
188 return;
189}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
191
192static inline uint16_t
193ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530194 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800195{
196 int msdu_credit_consumed;
197
Nirav Shahcbc6d722016-03-01 16:24:53 +0530198 TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199 TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530200 qdf_atomic_read(&pdev->target_tx_credit),
201 qdf_atomic_read(&pdev->target_tx_credit) - 1,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530202 qdf_nbuf_len(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203
204 msdu_credit_consumed = htt_tx_msdu_credit(msdu);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530205 ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800206 OL_TX_CREDIT_RECLAIM(pdev);
207
208 /*
209 * When the tx frame is downloaded to the target, there are two
210 * outstanding references:
211 * 1. The host download SW (HTT, HTC, HIF)
212 * This reference is cleared by the ol_tx_send_done callback
213 * functions.
214 * 2. The target FW
215 * This reference is cleared by the ol_tx_completion_handler
216 * function.
217 * It is extremely probable that the download completion is processed
218 * before the tx completion message. However, under exceptional
219 * conditions the tx completion may be processed first. Thus, rather
220 * that assuming that reference (1) is done before reference (2),
221 * explicit reference tracking is needed.
222 * Double-increment the ref count to account for both references
223 * described above.
224 */
225
226 OL_TX_DESC_REF_INIT(tx_desc);
227 OL_TX_DESC_REF_INC(tx_desc);
228 OL_TX_DESC_REF_INC(tx_desc);
229
230 return msdu_credit_consumed;
231}
232
233void
234ol_tx_send(struct ol_txrx_pdev_t *pdev,
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530235 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236{
237 int msdu_credit_consumed;
238 uint16_t id;
239 int failed;
240
241 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
242 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530243 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Nirav Shah07e39a62016-04-25 17:46:40 +0530244 DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
245 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530246 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
247 vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248 failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530249 if (qdf_unlikely(failed)) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530250 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
252 }
253}
254
255void
256ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530257 qdf_nbuf_t head_msdu, int num_msdus)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800258{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530259 qdf_nbuf_t rejected;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260 OL_TX_CREDIT_RECLAIM(pdev);
261
262 rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530263 while (qdf_unlikely(rejected)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264 struct ol_tx_desc_t *tx_desc;
265 uint16_t *msdu_id_storage;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530266 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267
Nirav Shahcbc6d722016-03-01 16:24:53 +0530268 next = qdf_nbuf_next(rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800269 msdu_id_storage = ol_tx_msdu_id_storage(rejected);
270 tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
271
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530272 ol_tx_target_credit_incr(pdev, rejected);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800273 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
274
275 rejected = next;
276 }
277}
278
279void
280ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
281 struct ol_tx_desc_t *tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530282 qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800283{
284 int msdu_credit_consumed;
285 uint16_t id;
286 int failed;
287
288 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
289 id = ol_tx_desc_id(pdev, tx_desc);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530290 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291 failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
292 if (failed) {
293 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
294 "Error: freeing tx frame after htt_tx failed");
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530295 ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800296 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
297 }
298}
299
300static inline void
301ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530302 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303{
304 struct ol_tx_desc_t *tx_desc;
305
306 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530307 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800308
309 /*
310 * If the download is done for
311 * the Management frame then
312 * call the download callback if registered
313 */
314 if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
315 int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
316 ol_txrx_mgmt_tx_cb download_cb =
317 pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb;
318
319 if (download_cb) {
320 download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt,
321 tx_desc->netbuf, status != A_OK);
322 }
323 }
324
325 if (status != A_OK) {
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530326 ol_tx_target_credit_incr(pdev, msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
328 1 /* download err */);
329 } else {
330 if (OL_TX_DESC_NO_REFS(tx_desc)) {
331 /*
332 * The decremented value was zero - free the frame.
333 * Use the tx status recorded previously during
334 * tx completion handling.
335 */
336 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
337 tx_desc->status !=
338 htt_tx_status_ok);
339 }
340 }
341}
342
343void
344ol_tx_download_done_ll(void *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530345 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800346{
347 ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
348 msdu_id);
349}
350
351void
352ol_tx_download_done_hl_retain(void *txrx_pdev,
353 A_STATUS status,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530354 qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355{
356 struct ol_txrx_pdev_t *pdev = txrx_pdev;
357 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
358}
359
360void
361ol_tx_download_done_hl_free(void *txrx_pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530362 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363{
364 struct ol_txrx_pdev_t *pdev = txrx_pdev;
365 struct ol_tx_desc_t *tx_desc;
366
367 tx_desc = ol_tx_desc_find(pdev, msdu_id);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530368 qdf_assert(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369
370 ol_tx_download_done_base(pdev, status, msdu, msdu_id);
371
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700372 if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) &&
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530374 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
376 }
377}
378
379void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
380{
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530381 qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382}
383
384void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
385{
386 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530387 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388 credit_delta,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530389 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390 credit_delta);
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530391 qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392}
393
394#ifdef QCA_COMPUTE_TX_DELAY
395
396static void
397ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
398 enum htt_tx_status status,
399 uint16_t *desc_ids, int num_msdus);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530400
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401#else
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530402static inline void
403ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
404 enum htt_tx_status status,
405 uint16_t *desc_ids, int num_msdus)
406{
407 return;
408}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800409#endif /* QCA_COMPUTE_TX_DELAY */
410
411#ifndef OL_TX_RESTORE_HDR
412#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
413#endif
414/*
415 * The following macros could have been inline functions too.
416 * The only rationale for choosing macros, is to force the compiler to inline
417 * the implementation, which cannot be controlled for actual "inline" functions,
418 * since "inline" is only a hint to the compiler.
419 * In the performance path, we choose to force the inlining, in preference to
420 * type-checking offered by the actual inlined functions.
421 */
422#define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
423 TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
424#ifndef ATH_11AC_TXCOMPACT
425#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
426 _lcl_freelist, _tx_desc_last) \
427 do { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530428 qdf_atomic_init(&(_tx_desc)->ref_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429 /* restore orginal hdr offset */ \
430 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530431 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
432 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
434 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530435 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800436 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
437 (_tx_desc); \
438 } \
439 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
440 } while (0)
441#else /*!ATH_11AC_TXCOMPACT */
442#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
443 _lcl_freelist, _tx_desc_last) \
444 do { \
445 /* restore orginal hdr offset */ \
446 OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530447 qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
448 qdf_nbuf_free((_netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
450 (_lcl_freelist); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530451 if (qdf_unlikely(!lcl_freelist)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800452 (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
453 (_tx_desc); \
454 } \
455 (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
456 } while (0)
457
458#endif /*!ATH_11AC_TXCOMPACT */
459
460#ifdef QCA_TX_SINGLE_COMPLETIONS
461#ifdef QCA_TX_STD_PATH_ONLY
462#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
463 _netbuf, _lcl_freelist, \
464 _tx_desc_last, _status) \
465 ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
466 (_netbuf), (_lcl_freelist), \
467 _tx_desc_last)
468#else /* !QCA_TX_STD_PATH_ONLY */
469#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
470 _netbuf, _lcl_freelist, \
471 _tx_desc_last, _status) \
472 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700473 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474 ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
475 (_netbuf), (_lcl_freelist), \
476 (_tx_desc_last)); \
477 } else { \
478 ol_tx_desc_frame_free_nonstd( \
479 (_pdev), (_tx_desc), \
480 (_status) != htt_tx_status_ok); \
481 } \
482 } while (0)
483#endif /* !QCA_TX_STD_PATH_ONLY */
484#else /* !QCA_TX_SINGLE_COMPLETIONS */
485#ifdef QCA_TX_STD_PATH_ONLY
486#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
487 _netbuf, _lcl_freelist, \
488 _tx_desc_last, _status) \
489 ol_tx_msdus_complete_batch((_pdev), (_tx_desc), (_tx_descs), (_status))
490#else /* !QCA_TX_STD_PATH_ONLY */
491#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
492 _netbuf, _lcl_freelist, \
493 _tx_desc_last, _status) \
494 do { \
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700495 if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496 ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
497 (_tx_descs), (_status)); \
498 } else { \
499 ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
500 (_status) != \
501 htt_tx_status_ok); \
502 } \
503 } while (0)
504#endif /* !QCA_TX_STD_PATH_ONLY */
505#endif /* QCA_TX_SINGLE_COMPLETIONS */
506
507void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
508{
509 int i = 0;
Leo Chang376398b2015-10-23 14:19:02 -0700510 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511
Leo Chang376398b2015-10-23 14:19:02 -0700512 for (i = 0; i < pdev->tx_desc.pool_size; i++) {
513 tx_desc = ol_tx_desc_find(pdev, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514 /*
515 * Confirm that each tx descriptor is "empty", i.e. it has
516 * no tx frame attached.
517 * In particular, check that there are no frames that have
518 * been given to the target to transmit, for which the
519 * target has never provided a response.
520 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530521 if (qdf_atomic_read(&tx_desc->ref_cnt)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
523 "Warning: freeing tx frame "
524 "(no tx completion from the target)\n");
525 ol_tx_desc_frame_free_nonstd(pdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 tx_desc, 1);
527 }
528 }
529}
530
531void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
532{
533 ol_tx_target_credit_update(pdev, credits);
534
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530535 if (pdev->cfg.is_high_latency)
536 ol_tx_sched(pdev);
537
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800538 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530539 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540}
541
542/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
543 ol_tx_completion_handler().
544 * any change in ol_tx_completion_handler() must be mirrored in
545 ol_tx_inspect_handler().
546*/
547void
548ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
549 int num_msdus,
550 enum htt_tx_status status, void *tx_desc_id_iterator)
551{
552 int i;
553 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
554 uint16_t tx_desc_id;
555 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 uint32_t byte_cnt = 0;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530557 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800558
559 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
560 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
561 ol_tx_desc_list tx_descs;
562 TAILQ_INIT(&tx_descs);
563
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530564 ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800565
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566 for (i = 0; i < num_msdus; i++) {
567 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700568 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569 tx_desc->status = status;
570 netbuf = tx_desc->netbuf;
Himanshu Agarwal89034612016-07-19 15:59:52 +0530571 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Nirav Shah07e39a62016-04-25 17:46:40 +0530572 DPTRACE(qdf_dp_trace_ptr(netbuf,
573 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
574 qdf_nbuf_data_addr(netbuf),
575 sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status));
Houston Hoffmanc2c47622016-07-12 13:05:31 -0700576 htc_pm_runtime_put(pdev->htt_pdev->htc_pdev);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530577 ol_tx_desc_update_group_credit(pdev, tx_desc_id, 1, 0, status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 /* Per SDU update of byte count */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530579 byte_cnt += qdf_nbuf_len(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800580 if (OL_TX_DESC_NO_REFS(tx_desc)) {
581 ol_tx_statistics(
582 pdev->ctrl_pdev,
583 HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
584 (tx_desc->
585 htt_tx_desc))),
586 status != htt_tx_status_ok);
587 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
588 lcl_freelist, tx_desc_last, status);
589 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
gbiane55c9562016-11-01 14:47:47 +0800591 tx_desc->pkt_type = ol_tx_frm_freed;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592#ifdef QCA_COMPUTE_TX_DELAY
593 tx_desc->entry_timestamp_ticks = 0xffffffff;
594#endif
595#endif
596 }
597
598 /* One shot protected access to pdev freelist, when setup */
599 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530600 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800601 tx_desc_last->next = pdev->tx_desc.freelist;
602 pdev->tx_desc.freelist = lcl_freelist;
603 pdev->tx_desc.num_free += (uint16_t) num_msdus;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530604 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800605 } else {
606 ol_tx_desc_frame_list_free(pdev, &tx_descs,
607 status != htt_tx_status_ok);
608 }
609
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530610 if (pdev->cfg.is_high_latency) {
611 /*
612 * Credit was already explicitly updated by HTT,
613 * but update the number of available tx descriptors,
614 * then invoke the scheduler, since new credit is probably
615 * available now.
616 */
617 qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
618 ol_tx_sched(pdev);
619 } else {
620 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
621 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800622
623 /* UNPAUSE OS Q */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530624 ol_tx_flow_ct_unpause_os_q(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625 /* Do one shot statistics */
626 TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
627}
628
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530629#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
630
631void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
632 u_int16_t tx_desc_id, int credit, u_int8_t absolute,
633 enum htt_tx_status status)
634{
635 uint8_t i, is_member;
636 uint16_t vdev_id_mask;
637 struct ol_tx_desc_t *tx_desc;
638
639 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
640 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
641 vdev_id_mask =
642 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
643 pdev->txq_grps[i].membership);
644 is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
645 tx_desc->vdev->vdev_id);
646 if (is_member) {
647 ol_txrx_update_group_credit(&pdev->txq_grps[i],
648 credit, absolute);
649 break;
650 }
651 }
652 ol_tx_update_group_credit_stats(pdev);
653}
654
655#ifdef DEBUG_HL_LOGGING
656
657void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
658{
659 uint16_t curr_index;
660 uint8_t i;
661
662 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
663 pdev->grp_stats.last_valid_index++;
664 if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
665 - 1)) {
666 pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
667 pdev->grp_stats.wrap_around = 1;
668 }
669 curr_index = pdev->grp_stats.last_valid_index;
670
671 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
672 pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
673 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
674 pdev->txq_grps[i].membership);
675 pdev->grp_stats.stats[curr_index].grp[i].credit =
676 qdf_atomic_read(&pdev->txq_grps[i].credit);
677 }
678
679 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
680}
681
682void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
683{
684 uint16_t i, j, is_break = 0;
685 int16_t curr_index, old_index, wrap_around;
686 uint16_t curr_credit, old_credit, mem_vdevs;
687
688 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
689 "Group credit stats:");
690 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
691 " No: GrpID: Credit: Change: vdev_map");
692
693 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
694 curr_index = pdev->grp_stats.last_valid_index;
695 wrap_around = pdev->grp_stats.wrap_around;
696 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
697
698 if (curr_index < 0) {
699 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
700 "Not initialized");
701 return;
702 }
703
704 for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
705 old_index = curr_index - 1;
706 if (old_index < 0) {
707 if (wrap_around == 0)
708 is_break = 1;
709 else
710 old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
711 }
712
713 for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
714 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
715 curr_credit =
716 pdev->grp_stats.stats[curr_index].
717 grp[j].credit;
718 if (!is_break)
719 old_credit =
720 pdev->grp_stats.stats[old_index].
721 grp[j].credit;
722
723 mem_vdevs =
724 pdev->grp_stats.stats[curr_index].grp[j].
725 member_vdevs;
726 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
727
728 if (!is_break)
729 QDF_TRACE(QDF_MODULE_ID_TXRX,
730 QDF_TRACE_LEVEL_ERROR,
731 "%4d: %5d: %6d %6d %8x",
732 curr_index, j,
733 curr_credit,
734 (curr_credit - old_credit),
735 mem_vdevs);
736 else
737 QDF_TRACE(QDF_MODULE_ID_TXRX,
738 QDF_TRACE_LEVEL_ERROR,
739 "%4d: %5d: %6d %6s %8x",
740 curr_index, j,
741 curr_credit, "NA", mem_vdevs);
742 }
743
744 if (is_break)
745 break;
746
747 curr_index = old_index;
748 }
749}
750
751void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
752{
753 qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
754 qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
755 pdev->grp_stats.last_valid_index = -1;
756 pdev->grp_stats.wrap_around = 0;
757 qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
758}
759#endif
760#endif
761
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762/*
763 * ol_tx_single_completion_handler performs the same tx completion
764 * processing as ol_tx_completion_handler, but for a single frame.
765 * ol_tx_completion_handler is optimized to handle batch completions
766 * as efficiently as possible; in contrast ol_tx_single_completion_handler
767 * handles single frames as simply and generally as possible.
768 * Thus, this ol_tx_single_completion_handler function is suitable for
769 * intermittent usage, such as for tx mgmt frames.
770 */
771void
772ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
773 enum htt_tx_status status, uint16_t tx_desc_id)
774{
775 struct ol_tx_desc_t *tx_desc;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530776 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800777
gbiane55c9562016-11-01 14:47:47 +0800778 tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
779 if (tx_desc == NULL) {
780 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
781 "%s: invalid desc_id(%u), ignore it.\n",
782 __func__,
783 tx_desc_id);
784 return;
785 }
786
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 tx_desc->status = status;
788 netbuf = tx_desc->netbuf;
789
Nirav Shahcbc6d722016-03-01 16:24:53 +0530790 QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800791 /* Do one shot statistics */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530792 TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800793
794 if (OL_TX_DESC_NO_REFS(tx_desc)) {
795 ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
796 status != htt_tx_status_ok);
797 }
798
799 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530800 qdf_atomic_read(&pdev->target_tx_credit),
801 1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530803 if (pdev->cfg.is_high_latency) {
804 /*
805 * Credit was already explicitly updated by HTT,
806 * but update the number of available tx descriptors,
807 * then invoke the scheduler, since new credit is probably
808 * available now.
809 */
810 qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
811 ol_tx_sched(pdev);
812 } else {
813 qdf_atomic_add(1, &pdev->target_tx_credit);
814 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800815}
816
817/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
818 ol_tx_completion_handler().
819 * any change in ol_tx_completion_handler() must be mirrored here.
820 */
821void
822ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
823 int num_msdus, void *tx_desc_id_iterator)
824{
825 uint16_t vdev_id, i;
826 struct ol_txrx_vdev_t *vdev;
827 uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
828 uint16_t tx_desc_id;
829 struct ol_tx_desc_t *tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800830 union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
831 union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530832 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800833 ol_tx_desc_list tx_descs;
834 TAILQ_INIT(&tx_descs);
835
836 for (i = 0; i < num_msdus; i++) {
837 tx_desc_id = desc_ids[i];
Leo Chang376398b2015-10-23 14:19:02 -0700838 tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800839 netbuf = tx_desc->netbuf;
840
841 /* find the "vdev" this tx_desc belongs to */
842 vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
843 (tx_desc->htt_tx_desc)));
844 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
845 if (vdev->vdev_id == vdev_id)
846 break;
847 }
848
849 /* vdev now points to the vdev for this descriptor. */
850
851#ifndef ATH_11AC_TXCOMPACT
852 /* save this multicast packet to local free list */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530853 if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800854#endif
855 {
856 /* for this function only, force htt status to be
857 "htt_tx_status_ok"
858 * for graceful freeing of this multicast frame
859 */
860 ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
861 lcl_freelist, tx_desc_last,
862 htt_tx_status_ok);
gbiane55c9562016-11-01 14:47:47 +0800863
864#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
865 tx_desc->pkt_type = ol_tx_frm_freed;
866#ifdef QCA_COMPUTE_TX_DELAY
867 tx_desc->entry_timestamp_ticks = 0xffffffff;
868#endif
869#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800870 }
871 }
872
873 if (lcl_freelist) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530874 qdf_spin_lock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875 tx_desc_last->next = pdev->tx_desc.freelist;
876 pdev->tx_desc.freelist = lcl_freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530877 qdf_spin_unlock(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800878 } else {
879 ol_tx_desc_frame_list_free(pdev, &tx_descs,
880 htt_tx_status_discard);
881 }
882 TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530883 qdf_atomic_read(&pdev->target_tx_credit),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800884 num_msdus,
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530885 qdf_atomic_read(&pdev->target_tx_credit) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800886 num_msdus);
887
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530888 if (pdev->cfg.is_high_latency) {
889 /* credit was already explicitly updated by HTT */
890 ol_tx_sched(pdev);
891 } else {
892 ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
893 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800894}
895
896#ifdef QCA_COMPUTE_TX_DELAY
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700897/**
898 * @brief updates the compute interval period for TSM stats.
899 * @details
900 * @param interval - interval for stats computation
901 */
Leo Chang98726762016-10-28 11:07:18 -0700902void ol_tx_set_compute_interval(void *ppdev, uint32_t interval)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800903{
Leo Chang98726762016-10-28 11:07:18 -0700904 ol_txrx_pdev_handle pdev = ppdev;
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530905 pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800906}
907
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -0700908/**
909 * @brief Return the uplink (transmitted) packet count and loss count.
910 * @details
911 * This function will be called for getting uplink packet count and
912 * loss count for given stream (access category) a regular interval.
913 * This also resets the counters hence, the value returned is packets
914 * counted in last 5(default) second interval. These counter are
915 * incremented per access category in ol_tx_completion_handler()
916 *
917 * @param category - access category of interest
918 * @param out_packet_count - number of packets transmitted
919 * @param out_packet_loss_count - number of packets lost
920 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921void
Leo Chang98726762016-10-28 11:07:18 -0700922ol_tx_packet_count(void *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923 uint16_t *out_packet_count,
924 uint16_t *out_packet_loss_count, int category)
925{
Leo Chang98726762016-10-28 11:07:18 -0700926 ol_txrx_pdev_handle pdev = ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800927 *out_packet_count = pdev->packet_count[category];
928 *out_packet_loss_count = pdev->packet_loss_count[category];
929 pdev->packet_count[category] = 0;
930 pdev->packet_loss_count[category] = 0;
931}
932
933uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
934{
935 uint32_t sum32;
936 int shift = 0;
937 /*
938 * To avoid doing a 64-bit divide, shift the sum down until it is
939 * no more than 32 bits (and shift the denominator to match).
940 */
941 while ((sum >> 32) != 0) {
942 sum >>= 1;
943 shift++;
944 }
945 sum32 = (uint32_t) sum;
946 num >>= shift;
947 return (sum32 + (num >> 1)) / num; /* round to nearest */
948}
949
950void
Leo Chang98726762016-10-28 11:07:18 -0700951ol_tx_delay(void *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800952 uint32_t *queue_delay_microsec,
953 uint32_t *tx_delay_microsec, int category)
954{
Leo Chang98726762016-10-28 11:07:18 -0700955 ol_txrx_pdev_handle pdev = ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800956 int index;
957 uint32_t avg_delay_ticks;
958 struct ol_tx_delay_data *data;
959
Anurag Chouhanc5548422016-02-24 18:33:27 +0530960 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530962 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
964
965 data = &pdev->tx_delay.cats[category].copies[index];
966
967 if (data->avgs.transmit_num > 0) {
968 avg_delay_ticks =
969 ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
970 data->avgs.transmit_num);
971 *tx_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530972 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800973 } else {
974 /*
975 * This case should only happen if there's a query
976 * within 5 sec after the first tx data frame.
977 */
978 *tx_delay_microsec = 0;
979 }
980 if (data->avgs.queue_num > 0) {
981 avg_delay_ticks =
982 ol_tx_delay_avg(data->avgs.queue_sum_ticks,
983 data->avgs.queue_num);
984 *queue_delay_microsec =
Anurag Chouhan50220ce2016-02-18 20:11:33 +0530985 qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800986 } else {
987 /*
988 * This case should only happen if there's a query
989 * within 5 sec after the first tx data frame.
990 */
991 *queue_delay_microsec = 0;
992 }
993
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530994 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995}
996
997void
Leo Chang98726762016-10-28 11:07:18 -0700998ol_tx_delay_hist(void *ppdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800999 uint16_t *report_bin_values, int category)
1000{
Leo Chang98726762016-10-28 11:07:18 -07001001 ol_txrx_pdev_handle pdev = ppdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001002 int index, i, j;
1003 struct ol_tx_delay_data *data;
1004
Anurag Chouhanc5548422016-02-24 18:33:27 +05301005 qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001006
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301007 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008 index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1009
1010 data = &pdev->tx_delay.cats[category].copies[index];
1011
1012 for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
1013 uint16_t internal_bin_sum = 0;
1014 while (j < (1 << i))
1015 internal_bin_sum += data->hist_bins_queue[j++];
1016
1017 report_bin_values[i] = internal_bin_sum;
1018 }
1019 report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
1020
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301021 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001022}
1023
1024#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001025
1026static uint8_t
1027ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301028 qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001029{
1030 uint16_t ethertype;
1031 uint8_t *dest_addr, *l3_hdr;
1032 int is_mgmt, is_mcast;
1033 int l2_hdr_size;
1034
1035 dest_addr = ol_tx_dest_addr_find(pdev, msdu);
1036 if (NULL == dest_addr)
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301037 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001038
1039 is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
1040 is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
1041 if (is_mgmt) {
1042 return (is_mcast) ?
1043 OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
1044 HTT_TX_EXT_TID_MGMT;
1045 }
1046 if (is_mcast)
1047 return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
1048
1049 if (pdev->frame_format == wlan_frm_fmt_802_3) {
1050 struct ethernet_hdr_t *enet_hdr;
Nirav Shahcbc6d722016-03-01 16:24:53 +05301051 enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001052 l2_hdr_size = sizeof(struct ethernet_hdr_t);
1053 ethertype =
1054 (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
1055 if (!IS_ETHERTYPE(ethertype)) {
1056 struct llc_snap_hdr_t *llc_hdr;
1057 llc_hdr = (struct llc_snap_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +05301058 (qdf_nbuf_data(msdu) + l2_hdr_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001059 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1060 ethertype =
1061 (llc_hdr->ethertype[0] << 8) | llc_hdr->
1062 ethertype[1];
1063 }
1064 } else {
1065 struct llc_snap_hdr_t *llc_hdr;
1066 l2_hdr_size = sizeof(struct ieee80211_frame);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301067 llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 + l2_hdr_size);
1069 l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1070 ethertype =
1071 (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1072 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05301073 l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 if (ETHERTYPE_IPV4 == ethertype) {
1075 return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
1076 } else if (ETHERTYPE_IPV6 == ethertype) {
1077 return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
1078 0x7;
1079 } else {
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301080 return QDF_NBUF_TX_EXT_TID_INVALID;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001081 }
1082}
1083#endif
1084
1085static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1086{
1087#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
Leo Chang376398b2015-10-23 14:19:02 -07001088 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001089 uint8_t tid;
1090
Nirav Shahcbc6d722016-03-01 16:24:53 +05301091 qdf_nbuf_t msdu = tx_desc->netbuf;
1092 tid = qdf_nbuf_get_tid(msdu);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301093 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001094 tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
Anurag Chouhanc73697b2016-02-21 15:05:43 +05301095 if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001096 /* TID could not be determined
1097 (this is not an IP frame?) */
1098 return -EINVAL;
1099 }
1100 }
1101 return tid;
1102#else
1103 return 0;
1104#endif
1105}
1106
1107static inline int
1108ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
1109{
1110 int bin;
1111 /*
1112 * For speed, multiply and shift to approximate a divide. This causes
1113 * a small error, but the approximation error should be much less
1114 * than the other uncertainties in the tx delay computation.
1115 */
1116 bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
1117 pdev->tx_delay.hist_internal_bin_width_shift;
1118 if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
1119 bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
1120
1121 return bin;
1122}
1123
1124static void
1125ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
1126 enum htt_tx_status status,
1127 uint16_t *desc_ids, int num_msdus)
1128{
1129 int i, index, cat;
Anurag Chouhan50220ce2016-02-18 20:11:33 +05301130 uint32_t now_ticks = qdf_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001131 uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
1132 uint32_t avg_time_ticks;
1133 struct ol_tx_delay_data *data;
1134
Anurag Chouhanc5548422016-02-24 18:33:27 +05301135 qdf_assert(num_msdus > 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001136
1137 /*
1138 * keep static counters for total packet and lost packets
1139 * reset them in ol_tx_delay(), function used to fetch the stats
1140 */
1141
1142 cat = ol_tx_delay_category(pdev, desc_ids[0]);
1143 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1144 return;
1145
1146 pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
1147 if (status != htt_tx_status_ok) {
1148 for (i = 0; i < num_msdus; i++) {
1149 cat = ol_tx_delay_category(pdev, desc_ids[i]);
1150 if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1151 return;
1152 pdev->packet_loss_count[cat]++;
1153 }
1154 return;
1155 }
1156
1157 /* since we may switch the ping-pong index, provide mutex w. readers */
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301158 qdf_spin_lock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001159 index = pdev->tx_delay.cats[cat].in_progress_idx;
1160
1161 data = &pdev->tx_delay.cats[cat].copies[index];
1162
1163 if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
1164 tx_delay_transmit_ticks =
1165 now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
1166 /*
1167 * We'd like to account for the number of MSDUs that were
1168 * transmitted together, but we don't know this. All we know
1169 * is the number of MSDUs that were acked together.
1170 * Since the frame error rate is small, this is nearly the same
1171 * as the number of frames transmitted together.
1172 */
1173 data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
1174 data->avgs.transmit_num += num_msdus;
1175 }
1176 pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
1177
1178 for (i = 0; i < num_msdus; i++) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001179 int bin;
Leo Chang376398b2015-10-23 14:19:02 -07001180 uint16_t id = desc_ids[i];
1181 struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001182
1183 tx_delay_queue_ticks =
1184 now_ticks - tx_desc->entry_timestamp_ticks;
1185
1186 data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
1187 data->avgs.queue_num++;
1188 bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
1189 data->hist_bins_queue[bin]++;
1190 }
1191
1192 /* check if it's time to start a new average */
1193 avg_time_ticks =
1194 now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
1195 if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
1196 pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
1197 index = 1 - index;
1198 pdev->tx_delay.cats[cat].in_progress_idx = index;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301199 qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001200 sizeof(pdev->tx_delay.cats[cat].copies[index]));
1201 }
1202
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301203 qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001204}
1205
1206#endif /* QCA_COMPUTE_TX_DELAY */