blob: 677cbb92249105826a8b9d3c8d5b4628be806527 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
hangtian127c9532019-01-12 13:29:07 +08002 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef _OL_TXRX_INTERNAL__H_
20#define _OL_TXRX_INTERNAL__H_
21
Anurag Chouhanc5548422016-02-24 18:33:27 +053022#include <qdf_util.h> /* qdf_assert */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053023#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053024#include <qdf_mem.h> /* qdf_mem_set */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080025#include <cds_ieee80211_common.h> /* ieee80211_frame */
26#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
27
28#include <ol_txrx_types.h>
29
30#include <ol_txrx_dbg.h>
31#include <enet.h> /* ETHERNET_HDR_LEN, etc. */
32#include <ipv4.h> /* IPV4_HDR_LEN, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033#include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */
34
35#ifdef ATH_11AC_TXCOMPACT
36#define OL_TX_DESC_NO_REFS(tx_desc) 1
37#define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */
38#define OL_TX_DESC_REF_INC(tx_desc) /* no-op */
39#else
40#define OL_TX_DESC_NO_REFS(tx_desc) \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053041 qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
42#define OL_TX_DESC_REF_INIT(tx_desc) qdf_atomic_init(&tx_desc->ref_cnt)
43#define OL_TX_DESC_REF_INC(tx_desc) qdf_atomic_inc(&tx_desc->ref_cnt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#endif
45
46#ifndef TXRX_ASSERT_LEVEL
47#define TXRX_ASSERT_LEVEL 3
48#endif
49
50#ifdef __KLOCWORK__
51#define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0)
52#define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0)
53#else /* #ifdef __KLOCWORK__ */
54
55#if TXRX_ASSERT_LEVEL > 0
Anurag Chouhanc5548422016-02-24 18:33:27 +053056#define TXRX_ASSERT1(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057#else
58#define TXRX_ASSERT1(condition)
59#endif
60
61#if TXRX_ASSERT_LEVEL > 1
Anurag Chouhanc5548422016-02-24 18:33:27 +053062#define TXRX_ASSERT2(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080063#else
64#define TXRX_ASSERT2(condition)
65#endif
66#endif /* #ifdef __KLOCWORK__ */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067
68#ifdef TXRX_PRINT_ENABLE
69
70#include <stdarg.h> /* va_list */
Anurag Chouhan6d760662016-02-20 16:05:43 +053071#include <qdf_types.h> /* qdf_vprint */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072
Nirav Shah99923a82018-06-23 14:35:49 +053073#define ol_txrx_alert(params...) \
74 QDF_TRACE_FATAL(QDF_MODULE_ID_TXRX, params)
75#define ol_txrx_err(params...) \
76 QDF_TRACE_ERROR(QDF_MODULE_ID_TXRX, params)
77#define ol_txrx_warn(params...) \
78 QDF_TRACE_WARN(QDF_MODULE_ID_TXRX, params)
79#define ol_txrx_info(params...) \
80 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
81#define ol_txrx_info_high(params...) \
82 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
83#define ol_txrx_dbg(params...) \
84 QDF_TRACE_DEBUG(QDF_MODULE_ID_TXRX, params)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085
Nirav Shah790d9432018-07-12 19:42:48 +053086#define txrx_nofl_alert(params...) \
87 QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_TXRX, params)
88#define txrx_nofl_err(params...) \
89 QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_TXRX, params)
90#define txrx_nofl_warn(params...) \
91 QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_TXRX, params)
92#define txrx_nofl_info(params...) \
93 QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_TXRX, params)
94#define txrx_nofl_dbg(params...) \
95 QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_TXRX, params)
96
Yun Park641304c2017-04-09 10:16:11 -070097/*
98 * define PN check failure message print rate
99 * as 1 second
100 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800101#define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000
102
103#else
Nirav Shah790d9432018-07-12 19:42:48 +0530104
Poddar, Siddarth14521792017-03-14 21:19:42 +0530105#define ol_txrx_alert(format, args...)
106#define ol_txrx_err(format, args...)
107#define ol_txrx_warn(format, args...)
108#define ol_txrx_info(format, args...)
109#define ol_txrx_info_high(format, args...)
110#define ol_txrx_dbg(format, args...)
Nirav Shah790d9432018-07-12 19:42:48 +0530111
112#define txrx_nofl_alert(params...)
113#define txrx_nofl_err(params...)
114#define txrx_nofl_warn(params...)
115#define txrx_nofl_info(params...)
116#define txrx_nofl_dbg(params...)
117
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800118#endif /* TXRX_PRINT_ENABLE */
119
120/*--- tx credit debug printouts ---*/
121
122#ifndef DEBUG_CREDIT
123#define DEBUG_CREDIT 0
124#endif
125
126#if DEBUG_CREDIT
Anurag Chouhan6d760662016-02-20 16:05:43 +0530127#define TX_CREDIT_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128#else
129#define TX_CREDIT_DEBUG_PRINT(fmt, ...)
130#endif
131
132/*--- tx scheduler debug printouts ---*/
133
134#ifdef HOST_TX_SCHED_DEBUG
Anurag Chouhan6d760662016-02-20 16:05:43 +0530135#define TX_SCHED_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136#else
137#define TX_SCHED_DEBUG_PRINT(fmt, ...)
138#endif
Anurag Chouhan6d760662016-02-20 16:05:43 +0530139#define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140
141#define OL_TXRX_LIST_APPEND(head, tail, elem) \
142 do { \
143 if (!(head)) { \
144 (head) = (elem); \
145 } else { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530146 qdf_nbuf_set_next((tail), (elem)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147 } \
148 (tail) = (elem); \
149 } while (0)
150
151static inline void
152ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
153 void *mpdu_list,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530154 qdf_nbuf_t *mpdu_tail, qdf_nbuf_t *next_mpdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155{
156 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530157 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
159 /*
160 * For now, we use a simply flat list of MSDUs.
161 * So, traverse the list until we reach the last MSDU within the MPDU.
162 */
163 TXRX_ASSERT2(mpdu_list);
164 msdu = mpdu_list;
165 while (!htt_rx_msdu_desc_completes_mpdu
166 (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530167 msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800168 TXRX_ASSERT2(msdu);
169 }
170 /* msdu now points to the last MSDU within the first MPDU */
171 *mpdu_tail = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530172 *next_mpdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173}
174
175/*--- txrx stats macros ---*/
176
177/* unconditional defs */
178#define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1)
179
180/* default conditional defs (may be undefed below) */
181
182#define TXRX_STATS_INIT(_pdev) \
hangtian127c9532019-01-12 13:29:07 +0800183 qdf_mem_zero(&((_pdev)->stats), sizeof((_pdev)->stats))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184#define TXRX_STATS_ADD(_pdev, _field, _delta) { \
185 _pdev->stats._field += _delta; }
186#define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
187 do { \
188 TXRX_STATS_INCR((pdev), pub.field.pkts); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530189 TXRX_STATS_ADD((pdev), pub.field.bytes, qdf_nbuf_len(netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 } while (0)
191
192/* conditional defs based on verbosity level */
193
194
195#define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
196 do { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530197 qdf_nbuf_t tmp_list = netbuf_list; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198 while (tmp_list) { \
199 TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530200 tmp_list = qdf_nbuf_next(tmp_list); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800201 } \
202 } while (0)
203
204#define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \
205 if (status == htt_tx_status_ok) \
206 TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \
207 else if (status == htt_tx_status_discard) \
208 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \
209 netbuf); \
210 else if (status == htt_tx_status_no_ack) \
211 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \
212 else if (status == htt_tx_status_download_fail) \
213 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \
214 netbuf); \
215 else \
216 /* NO-OP */; \
217 } while (0)
218
219#define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \
220 do { \
221 if (_p_cntrs == 1) { \
222 TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\
223 } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \
224 TXRX_STATS_ADD(_pdev, \
225 pub.tx.comp_histogram.pkts_2_10, 1); \
226 } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \
227 TXRX_STATS_ADD(_pdev, \
228 pub.tx.comp_histogram.pkts_11_20, 1); \
229 } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \
230 TXRX_STATS_ADD(_pdev, \
231 pub.tx.comp_histogram.pkts_21_30, 1); \
232 } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \
233 TXRX_STATS_ADD(_pdev, \
234 pub.tx.comp_histogram.pkts_31_40, 1); \
235 } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \
236 TXRX_STATS_ADD(_pdev, \
237 pub.tx.comp_histogram.pkts_41_50, 1); \
238 } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \
239 TXRX_STATS_ADD(_pdev, \
240 pub.tx.comp_histogram.pkts_51_60, 1); \
241 } else { \
242 TXRX_STATS_ADD(_pdev, \
243 pub.tx.comp_histogram.pkts_61_plus, 1); \
244 } \
245 } while (0)
246
247#define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \
248 do { \
249 switch (status) { \
250 case htt_tx_status_ok: \
251 TXRX_STATS_ADD(_pdev, \
252 pub.tx.delivered.pkts, _p_cntrs); \
253 TXRX_STATS_ADD(_pdev, \
254 pub.tx.delivered.bytes, _b_cntrs); \
255 break; \
256 case htt_tx_status_discard: \
257 TXRX_STATS_ADD(_pdev, \
258 pub.tx.dropped.target_discard.pkts, _p_cntrs);\
259 TXRX_STATS_ADD(_pdev, \
260 pub.tx.dropped.target_discard.bytes, _b_cntrs);\
261 break; \
262 case htt_tx_status_no_ack: \
263 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \
Jeff Johnson560dc562017-03-17 15:19:31 -0700264 _p_cntrs); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \
266 _b_cntrs); \
267 break; \
268 case htt_tx_status_download_fail: \
269 TXRX_STATS_ADD(_pdev, \
270 pub.tx.dropped.download_fail.pkts, _p_cntrs); \
271 TXRX_STATS_ADD(_pdev, \
272 pub.tx.dropped.download_fail.bytes, _b_cntrs);\
273 break; \
274 default: \
Mohit Khannaca4173b2017-09-12 21:52:19 -0700275 TXRX_STATS_ADD(_pdev, \
276 pub.tx.dropped.others.pkts, _p_cntrs); \
277 TXRX_STATS_ADD(_pdev, \
278 pub.tx.dropped.others.bytes, _b_cntrs); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800279 break; \
280 } \
281 TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \
282 } while (0)
283
284
285/*--- txrx sequence number trace macros ---*/
286
287#define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status)
288
289#if defined(ENABLE_RX_REORDER_TRACE)
290
291A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev);
292void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev);
293void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
294 uint8_t tid,
295 uint16_t reorder_idx,
296 uint16_t seq_num, int num_mpdus);
297
298#define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach
299#define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach
300#define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add
301
302#else
303
304#define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK
305#define OL_RX_REORDER_TRACE_DETACH(_pdev)
306#define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus)
307
308#endif /* ENABLE_RX_REORDER_TRACE */
309
310/*--- txrx packet number trace macros ---*/
311
312#if defined(ENABLE_RX_PN_TRACE)
313
314A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev);
315void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev);
316void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
317 struct ol_txrx_peer_t *peer,
318 uint16_t tid, void *rx_desc);
319
320#define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach
321#define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach
322#define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add
323
324#else
325
326#define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK
327#define OL_RX_PN_TRACE_DETACH(_pdev)
328#define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc)
329
330#endif /* ENABLE_RX_PN_TRACE */
331
332static inline int ol_txrx_ieee80211_hdrsize(const void *data)
333{
334 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
335 int size = sizeof(struct ieee80211_frame);
336
337 /* NB: we don't handle control frames */
338 TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
339 IEEE80211_FC0_TYPE_CTL);
340 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
341 IEEE80211_FC1_DIR_DSTODS)
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -0800342 size += QDF_MAC_ADDR_SIZE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 if (IEEE80211_QOS_HAS_SEQ(wh)) {
344 size += sizeof(uint16_t);
345 /* Qos frame with Order bit set indicates an HTC frame */
346 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
347 size += sizeof(struct ieee80211_htc);
348 }
349 return size;
350}
351
352/*--- frame display utility ---*/
353
354enum ol_txrx_frm_dump_options {
355 ol_txrx_frm_dump_contents = 0x1,
356 ol_txrx_frm_dump_tcp_seq = 0x2,
357};
358
359#ifdef TXRX_DEBUG_DATA
360static inline void
361ol_txrx_frms_dump(const char *name,
362 struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530363 qdf_nbuf_t frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364 enum ol_txrx_frm_dump_options display_options, int max_len)
365{
366#define TXRX_FRM_DUMP_MAX_LEN 128
367 uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 };
368 uint8_t *p;
369
370 if (name) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530371 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, "%s\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800372 name);
373 }
374 while (frm) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530375 p = qdf_nbuf_data(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 if (display_options & ol_txrx_frm_dump_tcp_seq) {
377 int tcp_offset;
378 int l2_hdr_size;
379 uint16_t ethtype;
380 uint8_t ip_prot;
381
382 if (pdev->frame_format == wlan_frm_fmt_802_3) {
383 struct ethernet_hdr_t *enet_hdr =
384 (struct ethernet_hdr_t *)p;
385 l2_hdr_size = ETHERNET_HDR_LEN;
386
387 /*
388 * LLC/SNAP present?
389 */
390 ethtype = (enet_hdr->ethertype[0] << 8) |
391 enet_hdr->ethertype[1];
392 if (!IS_ETHERTYPE(ethertype)) {
393 /* 802.3 format */
394 struct llc_snap_hdr_t *llc_hdr;
395
396 llc_hdr = (struct llc_snap_hdr_t *)
397 (p + l2_hdr_size);
398 l2_hdr_size += LLC_SNAP_HDR_LEN;
399 ethtype = (llc_hdr->ethertype[0] << 8) |
400 llc_hdr->ethertype[1];
401 }
402 } else {
403 struct llc_snap_hdr_t *llc_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700404
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 /* (generic?) 802.11 */
406 l2_hdr_size = sizeof(struct ieee80211_frame);
407 llc_hdr = (struct llc_snap_hdr_t *)
408 (p + l2_hdr_size);
409 l2_hdr_size += LLC_SNAP_HDR_LEN;
410 ethtype = (llc_hdr->ethertype[0] << 8) |
411 llc_hdr->ethertype[1];
412 }
413 if (ethtype == ETHERTYPE_IPV4) {
414 struct ipv4_hdr_t *ipv4_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700415
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416 ipv4_hdr =
417 (struct ipv4_hdr_t *)(p + l2_hdr_size);
418 ip_prot = ipv4_hdr->protocol;
419 tcp_offset = l2_hdr_size + IPV4_HDR_LEN;
420 } else if (ethtype == ETHERTYPE_IPV6) {
421 struct ipv6_hdr_t *ipv6_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700422
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423 ipv6_hdr =
424 (struct ipv6_hdr_t *)(p + l2_hdr_size);
425 ip_prot = ipv6_hdr->next_hdr;
426 tcp_offset = l2_hdr_size + IPV6_HDR_LEN;
427 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530428 QDF_TRACE(QDF_MODULE_ID_TXRX,
429 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700430 "frame %pK non-IP ethertype (%x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800431 frm, ethtype);
432 goto NOT_IP_TCP;
433 }
434 if (ip_prot == IP_PROTOCOL_TCP) {
435#if NEVERDEFINED
436 struct tcp_hdr_t *tcp_hdr;
437 uint32_t tcp_seq_num;
Yun Park641304c2017-04-09 10:16:11 -0700438
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800439 tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset);
440 tcp_seq_num =
441 (tcp_hdr->seq_num[0] << 24) |
442 (tcp_hdr->seq_num[1] << 16) |
443 (tcp_hdr->seq_num[1] << 8) |
444 (tcp_hdr->seq_num[1] << 0);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530445 QDF_TRACE(QDF_MODULE_ID_TXRX,
446 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700447 "frame %pK: TCP seq num = %d\n", frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448 tcp_seq_num);
449#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530450 QDF_TRACE(QDF_MODULE_ID_TXRX,
451 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700452 "frame %pK: TCP seq num = %d\n", frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800453 ((*(p + tcp_offset + 4)) << 24) |
454 ((*(p + tcp_offset + 5)) << 16) |
455 ((*(p + tcp_offset + 6)) << 8) |
456 (*(p + tcp_offset + 7)));
457#endif
458 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530459 QDF_TRACE(QDF_MODULE_ID_TXRX,
460 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700461 "frame %pK non-TCP IP protocol (%x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462 frm, ip_prot);
463 }
464 }
465NOT_IP_TCP:
466 if (display_options & ol_txrx_frm_dump_contents) {
467 int i, frag_num, len_lim;
Yun Park641304c2017-04-09 10:16:11 -0700468
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800469 len_lim = max_len;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530470 if (len_lim > qdf_nbuf_len(frm))
471 len_lim = qdf_nbuf_len(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
473 len_lim = TXRX_FRM_DUMP_MAX_LEN;
474
475 /*
476 * Gather frame contents from netbuf fragments
477 * into a contiguous buffer.
478 */
479 frag_num = 0;
480 i = 0;
481 while (i < len_lim) {
482 int frag_bytes;
Yun Park641304c2017-04-09 10:16:11 -0700483
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484 frag_bytes =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530485 qdf_nbuf_get_frag_len(frm, frag_num);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 if (frag_bytes > len_lim - i)
487 frag_bytes = len_lim - i;
488 if (frag_bytes > 0) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530489 p = qdf_nbuf_get_frag_vaddr(frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490 frag_num);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530491 qdf_mem_copy(&local_buf[i], p,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492 frag_bytes);
493 }
494 frag_num++;
495 i += frag_bytes;
496 }
497
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530498 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700499 "frame %pK data (%pK), hex dump of bytes 0-%d of %d:\n",
Yun Park641304c2017-04-09 10:16:11 -0700500 frm, p, len_lim - 1, (int)qdf_nbuf_len(frm));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 p = local_buf;
502 while (len_lim > 16) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530503 QDF_TRACE(QDF_MODULE_ID_TXRX,
504 QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800505 " " /* indent */
506 "%02x %02x %02x %02x %02x %02x %02x %02x "
507 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
508 *(p + 0), *(p + 1), *(p + 2),
509 *(p + 3), *(p + 4), *(p + 5),
510 *(p + 6), *(p + 7), *(p + 8),
511 *(p + 9), *(p + 10), *(p + 11),
512 *(p + 12), *(p + 13), *(p + 14),
513 *(p + 15));
514 p += 16;
515 len_lim -= 16;
516 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800518 " " /* indent */);
519 while (len_lim > 0) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530520 QDF_TRACE(QDF_MODULE_ID_TXRX,
521 QDF_TRACE_LEVEL_INFO, "%02x ", *p);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 p++;
523 len_lim--;
524 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530525 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 "\n");
527 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530528 frm = qdf_nbuf_next(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800529 }
530}
531#else
532#define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len)
533#endif /* TXRX_DEBUG_DATA */
534
535#ifdef SUPPORT_HOST_STATISTICS
536
537#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \
538 ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \
Yun Park641304c2017-04-09 10:16:11 -0700539 sec_type, is_mcast)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540
541#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \
542 do { \
543 int is_mcast; \
544 enum htt_sec_type sec_type; \
545 is_mcast = htt_rx_msdu_is_wlan_mcast( \
546 pdev->htt_pdev, rx_desc); \
547 sec_type = peer->security[is_mcast \
548 ? txrx_sec_mcast \
549 : txrx_sec_ucast].sec_type; \
550 OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \
551 pdev->sec_types[sec_type], \
552 is_mcast); \
553 } while (false)
554
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530555#ifdef CONFIG_HL_SUPPORT
556
557 /**
558 * ol_rx_err_inv_get_wifi_header() - retrieve wifi header
559 * @pdev: handle to the physical device
560 * @rx_msdu: msdu of which header needs to be retrieved
561 *
562 * Return: wifi header
563 */
564 static inline
565 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
566 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
567 {
568 return NULL;
569 }
570#else
571
572 static inline
573 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
574 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
575 {
576 struct ieee80211_frame *wh = NULL;
Yun Park641304c2017-04-09 10:16:11 -0700577
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530578 if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi)
579 /* For windows, it is always native wifi header .*/
580 wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu);
581
582 return wh;
583 }
584#endif
585
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800586#define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
587 do { \
588 struct ieee80211_frame *wh = NULL; \
589 /*FIX THIS : */ \
590 /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \
591 /*But at present it seems it does not work.*/ \
592 /*wh = (struct ieee80211_frame *) */ \
593 /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
594 /* this only apply to LL device.*/ \
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530595 wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596 ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
597 wh, OL_RX_ERR_UNKNOWN_PEER); \
598 } while (false)
599
600#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \
601 do { \
602 enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \
603 if (rx_status == htt_rx_status_decrypt_err) \
604 err_type = OL_RX_ERR_DECRYPT; \
605 else if (rx_status == htt_rx_status_tkip_mic_err) \
606 err_type = OL_RX_ERR_TKIP_MIC; \
607 else if (rx_status == htt_rx_status_mpdu_length_err) \
608 err_type = OL_RX_ERR_MPDU_LENGTH; \
609 else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \
610 err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \
611 else if (rx_status == htt_rx_status_err_dup) \
612 err_type = OL_RX_ERR_DUP; \
613 else if (rx_status == htt_rx_status_err_fcs) \
614 err_type = OL_RX_ERR_FCS; \
615 else \
616 err_type = OL_RX_ERR_UNKNOWN; \
617 \
Jeff Johnson6795c3a2019-03-18 13:43:04 -0700618 if (vdev && peer) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800619 OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \
620 rx_mpdu_desc, err_type); \
621 } else { \
622 OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \
623 } \
624 } while (false)
625#else
626#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast)
627#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type)
628#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status)
629#endif /* SUPPORT_HOST_STATISTICS */
630
631#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
632#define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
633 do { \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530634 qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800635 peer->stats.tx_or_rx.frms.type += 1; \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530636 peer->stats.tx_or_rx.bytes.type += qdf_nbuf_len(msdu); \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530637 qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 } while (0)
639#define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
640 do { \
641 struct ol_txrx_vdev_t *vdev = peer->vdev; \
642 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
643 uint8_t *dest_addr; \
644 if (pdev->frame_format == wlan_frm_fmt_802_3) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530645 dest_addr = qdf_nbuf_data(msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800646 } else { /* 802.11 format */ \
647 struct ieee80211_frame *frm; \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530648 frm = (struct ieee80211_frame *) qdf_nbuf_data(msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800649 if (vdev->opmode == wlan_op_mode_ap) { \
650 dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
651 } else { \
652 dest_addr = (uint8_t *) &(frm->i_addr3[0]); \
653 } \
654 } \
Srinivas Girigowda9cd6c342019-02-09 13:06:12 -0800655 if (qdf_unlikely(QDF_IS_ADDR_BROADCAST(dest_addr))) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
657 bcast, msdu); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530658 } else if (qdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
660 mcast, msdu); \
661 } else { \
662 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
663 ucast, msdu); \
664 } \
665 } while (0)
666#define OL_TX_PEER_STATS_UPDATE(peer, msdu) \
667 OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu)
668#define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
669 OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
670#define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530671 qdf_spinlock_create(&pdev->peer_stat_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800672#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530673 qdf_spinlock_destroy(&pdev->peer_stat_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674#else
675#define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
676#define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
677#define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */
678#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */
679#endif
680
681#ifndef DEBUG_HTT_CREDIT
682#define DEBUG_HTT_CREDIT 0
683#endif
684
685#if defined(FEATURE_TSO_DEBUG)
Nirav Shahda008342016-05-17 18:50:40 +0530686#define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) \
687 do { \
688 if (_p_cntrs == 1) { \
689 TXRX_STATS_ADD(_pdev, pub.tx.tso.tso_hist.pkts_1, 1); \
Yun Park641304c2017-04-09 10:16:11 -0700690 } else if (_p_cntrs >= 2 && _p_cntrs <= 5) { \
Nirav Shahda008342016-05-17 18:50:40 +0530691 TXRX_STATS_ADD(_pdev, \
692 pub.tx.tso.tso_hist.pkts_2_5, 1); \
693 } else if (_p_cntrs > 5 && _p_cntrs <= 10) { \
694 TXRX_STATS_ADD(_pdev, \
695 pub.tx.tso.tso_hist.pkts_6_10, 1); \
696 } else if (_p_cntrs > 10 && _p_cntrs <= 15) { \
697 TXRX_STATS_ADD(_pdev, \
698 pub.tx.tso.tso_hist.pkts_11_15, 1); \
699 } else if (_p_cntrs > 15 && _p_cntrs <= 20) { \
700 TXRX_STATS_ADD(_pdev, \
701 pub.tx.tso.tso_hist.pkts_16_20, 1); \
702 } else if (_p_cntrs > 20) { \
703 TXRX_STATS_ADD(_pdev, \
704 pub.tx.tso.tso_hist.pkts_20_plus, 1); \
705 } \
706 } while (0)
707
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530708#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 do { \
Yun Park641304c2017-04-09 10:16:11 -0700710 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg \
711 = 0; \
712 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx \
713 = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714 } while (0)
715
716#define TXRX_STATS_TSO_MSDU_IDX(pdev) \
717 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx
718
719#define TXRX_STATS_TSO_MSDU(pdev, idx) \
720 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx]
721
722#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \
723 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg
724
Nirav Shahda008342016-05-17 18:50:40 +0530725#define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) \
726 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].gso_size
727
728#define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) \
729 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].total_len
730
731#define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) \
732 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].nr_frags
733
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530734#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) \
735 TXRX_STATS_TSO_MSDU(pdev, idx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530737#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) \
738 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx
739
740#define TXRX_STATS_TSO_INC_SEG(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741 do { \
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530742 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg++; \
743 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg &= \
Nirav Shahda008342016-05-17 18:50:40 +0530744 NUM_MAX_TSO_SEGS_MASK; \
745 } while (0)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800746
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530747#define TXRX_STATS_TSO_RST_SEG(pdev, idx) \
748 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg = 0
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530750#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) \
751 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx = 0
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800752
753#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
754 TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
755
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530756#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) \
757 TXRX_STATS_TSO_SEG(pdev, idx, \
758 TXRX_STATS_TSO_SEG_IDX(pdev, idx)) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530760#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 do { \
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530762 TXRX_STATS_TSO_SEG_IDX(pdev, idx)++; \
763 TXRX_STATS_TSO_SEG_IDX(pdev, idx) &= NUM_MAX_TSO_SEGS_MASK; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 } while (0)
765
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530766#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) \
767 (TXRX_STATS_TSO_CURR_SEG(pdev, idx) = tso_seg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800768
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530769#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) \
770 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).gso_size = size)
Nirav Shahda008342016-05-17 18:50:40 +0530771
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530772#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) \
773 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).total_len = len)
Nirav Shahda008342016-05-17 18:50:40 +0530774
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530775#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) \
776 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).nr_frags = frags)
Nirav Shahda008342016-05-17 18:50:40 +0530777
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800778#else
Nirav Shahda008342016-05-17 18:50:40 +0530779#define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530780#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800781#define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
782#define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
783#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530784#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800785#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530786#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530788#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) /* no-op */
789#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) /* no-op */
790#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) /* no-op */
791#define TXRX_STATS_TSO_INC_SEG(pdev, idx) /* no-op */
792#define TXRX_STATS_TSO_RST_SEG(pdev, idx) /* no-op */
793#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) /* no-op */
794#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) /* no-op */
795#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) /* no-op */
796#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) /* no-op */
Nirav Shahda008342016-05-17 18:50:40 +0530797#define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) /* no-op */
798#define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) /* no-op */
799#define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800
801#endif /* FEATURE_TSO_DEBUG */
802
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530803#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
804
805void
806ol_txrx_update_group_credit(
807 struct ol_tx_queue_group_t *group,
808 int32_t credit,
809 u_int8_t absolute);
810#endif
811
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800812#endif /* _OL_TXRX_INTERNAL__H_ */