blob: a6fd30c7b40a02eb9def95d7330f3b4fa4b64e5f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Nirav Shah99923a82018-06-23 14:35:49 +05302 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019#ifndef _OL_TXRX_INTERNAL__H_
20#define _OL_TXRX_INTERNAL__H_
21
Anurag Chouhanc5548422016-02-24 18:33:27 +053022#include <qdf_util.h> /* qdf_assert */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053023#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053024#include <qdf_mem.h> /* qdf_mem_set */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080025#include <cds_ieee80211_common.h> /* ieee80211_frame */
26#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
27
28#include <ol_txrx_types.h>
29
30#include <ol_txrx_dbg.h>
31#include <enet.h> /* ETHERNET_HDR_LEN, etc. */
32#include <ipv4.h> /* IPV4_HDR_LEN, etc. */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033#include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */
34
35#ifdef ATH_11AC_TXCOMPACT
36#define OL_TX_DESC_NO_REFS(tx_desc) 1
37#define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */
38#define OL_TX_DESC_REF_INC(tx_desc) /* no-op */
39#else
40#define OL_TX_DESC_NO_REFS(tx_desc) \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053041 qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
42#define OL_TX_DESC_REF_INIT(tx_desc) qdf_atomic_init(&tx_desc->ref_cnt)
43#define OL_TX_DESC_REF_INC(tx_desc) qdf_atomic_inc(&tx_desc->ref_cnt)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#endif
45
46#ifndef TXRX_ASSERT_LEVEL
47#define TXRX_ASSERT_LEVEL 3
48#endif
49
50#ifdef __KLOCWORK__
51#define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0)
52#define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0)
53#else /* #ifdef __KLOCWORK__ */
54
55#if TXRX_ASSERT_LEVEL > 0
Anurag Chouhanc5548422016-02-24 18:33:27 +053056#define TXRX_ASSERT1(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057#else
58#define TXRX_ASSERT1(condition)
59#endif
60
61#if TXRX_ASSERT_LEVEL > 1
Anurag Chouhanc5548422016-02-24 18:33:27 +053062#define TXRX_ASSERT2(condition) qdf_assert((condition))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080063#else
64#define TXRX_ASSERT2(condition)
65#endif
66#endif /* #ifdef __KLOCWORK__ */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080067
68#ifdef TXRX_PRINT_ENABLE
69
70#include <stdarg.h> /* va_list */
Anurag Chouhan6d760662016-02-20 16:05:43 +053071#include <qdf_types.h> /* qdf_vprint */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080072
Nirav Shah99923a82018-06-23 14:35:49 +053073#define ol_txrx_alert(params...) \
74 QDF_TRACE_FATAL(QDF_MODULE_ID_TXRX, params)
75#define ol_txrx_err(params...) \
76 QDF_TRACE_ERROR(QDF_MODULE_ID_TXRX, params)
77#define ol_txrx_warn(params...) \
78 QDF_TRACE_WARN(QDF_MODULE_ID_TXRX, params)
79#define ol_txrx_info(params...) \
80 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
81#define ol_txrx_info_high(params...) \
82 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
83#define ol_txrx_dbg(params...) \
84 QDF_TRACE_DEBUG(QDF_MODULE_ID_TXRX, params)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085
Yun Park641304c2017-04-09 10:16:11 -070086/*
87 * define PN check failure message print rate
88 * as 1 second
89 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080090#define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000
91
92#else
Poddar, Siddarth14521792017-03-14 21:19:42 +053093#define ol_txrx_alert(format, args...)
94#define ol_txrx_err(format, args...)
95#define ol_txrx_warn(format, args...)
96#define ol_txrx_info(format, args...)
97#define ol_txrx_info_high(format, args...)
98#define ol_txrx_dbg(format, args...)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080099#endif /* TXRX_PRINT_ENABLE */
100
101/*--- tx credit debug printouts ---*/
102
103#ifndef DEBUG_CREDIT
104#define DEBUG_CREDIT 0
105#endif
106
107#if DEBUG_CREDIT
Anurag Chouhan6d760662016-02-20 16:05:43 +0530108#define TX_CREDIT_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800109#else
110#define TX_CREDIT_DEBUG_PRINT(fmt, ...)
111#endif
112
113/*--- tx scheduler debug printouts ---*/
114
115#ifdef HOST_TX_SCHED_DEBUG
Anurag Chouhan6d760662016-02-20 16:05:43 +0530116#define TX_SCHED_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800117#else
118#define TX_SCHED_DEBUG_PRINT(fmt, ...)
119#endif
Anurag Chouhan6d760662016-02-20 16:05:43 +0530120#define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800121
122#define OL_TXRX_LIST_APPEND(head, tail, elem) \
123 do { \
124 if (!(head)) { \
125 (head) = (elem); \
126 } else { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530127 qdf_nbuf_set_next((tail), (elem)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128 } \
129 (tail) = (elem); \
130 } while (0)
131
132static inline void
133ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
134 void *mpdu_list,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530135 qdf_nbuf_t *mpdu_tail, qdf_nbuf_t *next_mpdu)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136{
137 htt_pdev_handle htt_pdev = pdev->htt_pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530138 qdf_nbuf_t msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800139
140 /*
141 * For now, we use a simply flat list of MSDUs.
142 * So, traverse the list until we reach the last MSDU within the MPDU.
143 */
144 TXRX_ASSERT2(mpdu_list);
145 msdu = mpdu_list;
146 while (!htt_rx_msdu_desc_completes_mpdu
147 (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530148 msdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149 TXRX_ASSERT2(msdu);
150 }
151 /* msdu now points to the last MSDU within the first MPDU */
152 *mpdu_tail = msdu;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530153 *next_mpdu = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800154}
155
156/*--- txrx stats macros ---*/
157
158/* unconditional defs */
159#define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1)
160
161/* default conditional defs (may be undefed below) */
162
163#define TXRX_STATS_INIT(_pdev) \
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530164 qdf_mem_set(&((_pdev)->stats), sizeof((_pdev)->stats), 0x0)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165#define TXRX_STATS_ADD(_pdev, _field, _delta) { \
166 _pdev->stats._field += _delta; }
167#define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
168 do { \
169 TXRX_STATS_INCR((pdev), pub.field.pkts); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530170 TXRX_STATS_ADD((pdev), pub.field.bytes, qdf_nbuf_len(netbuf)); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171 } while (0)
172
173/* conditional defs based on verbosity level */
174
175
176#define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
177 do { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530178 qdf_nbuf_t tmp_list = netbuf_list; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800179 while (tmp_list) { \
180 TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530181 tmp_list = qdf_nbuf_next(tmp_list); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800182 } \
183 } while (0)
184
185#define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \
186 if (status == htt_tx_status_ok) \
187 TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \
188 else if (status == htt_tx_status_discard) \
189 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \
190 netbuf); \
191 else if (status == htt_tx_status_no_ack) \
192 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \
193 else if (status == htt_tx_status_download_fail) \
194 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \
195 netbuf); \
196 else \
197 /* NO-OP */; \
198 } while (0)
199
200#define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \
201 do { \
202 if (_p_cntrs == 1) { \
203 TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\
204 } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \
205 TXRX_STATS_ADD(_pdev, \
206 pub.tx.comp_histogram.pkts_2_10, 1); \
207 } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \
208 TXRX_STATS_ADD(_pdev, \
209 pub.tx.comp_histogram.pkts_11_20, 1); \
210 } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \
211 TXRX_STATS_ADD(_pdev, \
212 pub.tx.comp_histogram.pkts_21_30, 1); \
213 } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \
214 TXRX_STATS_ADD(_pdev, \
215 pub.tx.comp_histogram.pkts_31_40, 1); \
216 } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \
217 TXRX_STATS_ADD(_pdev, \
218 pub.tx.comp_histogram.pkts_41_50, 1); \
219 } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \
220 TXRX_STATS_ADD(_pdev, \
221 pub.tx.comp_histogram.pkts_51_60, 1); \
222 } else { \
223 TXRX_STATS_ADD(_pdev, \
224 pub.tx.comp_histogram.pkts_61_plus, 1); \
225 } \
226 } while (0)
227
228#define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \
229 do { \
230 switch (status) { \
231 case htt_tx_status_ok: \
232 TXRX_STATS_ADD(_pdev, \
233 pub.tx.delivered.pkts, _p_cntrs); \
234 TXRX_STATS_ADD(_pdev, \
235 pub.tx.delivered.bytes, _b_cntrs); \
236 break; \
237 case htt_tx_status_discard: \
238 TXRX_STATS_ADD(_pdev, \
239 pub.tx.dropped.target_discard.pkts, _p_cntrs);\
240 TXRX_STATS_ADD(_pdev, \
241 pub.tx.dropped.target_discard.bytes, _b_cntrs);\
242 break; \
243 case htt_tx_status_no_ack: \
244 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \
Jeff Johnson560dc562017-03-17 15:19:31 -0700245 _p_cntrs); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800246 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \
247 _b_cntrs); \
248 break; \
249 case htt_tx_status_download_fail: \
250 TXRX_STATS_ADD(_pdev, \
251 pub.tx.dropped.download_fail.pkts, _p_cntrs); \
252 TXRX_STATS_ADD(_pdev, \
253 pub.tx.dropped.download_fail.bytes, _b_cntrs);\
254 break; \
255 default: \
Mohit Khannaca4173b2017-09-12 21:52:19 -0700256 TXRX_STATS_ADD(_pdev, \
257 pub.tx.dropped.others.pkts, _p_cntrs); \
258 TXRX_STATS_ADD(_pdev, \
259 pub.tx.dropped.others.bytes, _b_cntrs); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260 break; \
261 } \
262 TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \
263 } while (0)
264
265
266/*--- txrx sequence number trace macros ---*/
267
268#define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status)
269
270#if defined(ENABLE_RX_REORDER_TRACE)
271
272A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev);
273void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev);
274void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
275 uint8_t tid,
276 uint16_t reorder_idx,
277 uint16_t seq_num, int num_mpdus);
278
279#define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach
280#define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach
281#define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add
282
283#else
284
285#define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK
286#define OL_RX_REORDER_TRACE_DETACH(_pdev)
287#define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus)
288
289#endif /* ENABLE_RX_REORDER_TRACE */
290
291/*--- txrx packet number trace macros ---*/
292
293#if defined(ENABLE_RX_PN_TRACE)
294
295A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev);
296void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev);
297void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
298 struct ol_txrx_peer_t *peer,
299 uint16_t tid, void *rx_desc);
300
301#define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach
302#define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach
303#define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add
304
305#else
306
307#define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK
308#define OL_RX_PN_TRACE_DETACH(_pdev)
309#define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc)
310
311#endif /* ENABLE_RX_PN_TRACE */
312
313static inline int ol_txrx_ieee80211_hdrsize(const void *data)
314{
315 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
316 int size = sizeof(struct ieee80211_frame);
317
318 /* NB: we don't handle control frames */
319 TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
320 IEEE80211_FC0_TYPE_CTL);
321 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
322 IEEE80211_FC1_DIR_DSTODS)
323 size += IEEE80211_ADDR_LEN;
324 if (IEEE80211_QOS_HAS_SEQ(wh)) {
325 size += sizeof(uint16_t);
326 /* Qos frame with Order bit set indicates an HTC frame */
327 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
328 size += sizeof(struct ieee80211_htc);
329 }
330 return size;
331}
332
333/*--- frame display utility ---*/
334
335enum ol_txrx_frm_dump_options {
336 ol_txrx_frm_dump_contents = 0x1,
337 ol_txrx_frm_dump_tcp_seq = 0x2,
338};
339
340#ifdef TXRX_DEBUG_DATA
341static inline void
342ol_txrx_frms_dump(const char *name,
343 struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530344 qdf_nbuf_t frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 enum ol_txrx_frm_dump_options display_options, int max_len)
346{
347#define TXRX_FRM_DUMP_MAX_LEN 128
348 uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 };
349 uint8_t *p;
350
351 if (name) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530352 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, "%s\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 name);
354 }
355 while (frm) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530356 p = qdf_nbuf_data(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 if (display_options & ol_txrx_frm_dump_tcp_seq) {
358 int tcp_offset;
359 int l2_hdr_size;
360 uint16_t ethtype;
361 uint8_t ip_prot;
362
363 if (pdev->frame_format == wlan_frm_fmt_802_3) {
364 struct ethernet_hdr_t *enet_hdr =
365 (struct ethernet_hdr_t *)p;
366 l2_hdr_size = ETHERNET_HDR_LEN;
367
368 /*
369 * LLC/SNAP present?
370 */
371 ethtype = (enet_hdr->ethertype[0] << 8) |
372 enet_hdr->ethertype[1];
373 if (!IS_ETHERTYPE(ethertype)) {
374 /* 802.3 format */
375 struct llc_snap_hdr_t *llc_hdr;
376
377 llc_hdr = (struct llc_snap_hdr_t *)
378 (p + l2_hdr_size);
379 l2_hdr_size += LLC_SNAP_HDR_LEN;
380 ethtype = (llc_hdr->ethertype[0] << 8) |
381 llc_hdr->ethertype[1];
382 }
383 } else {
384 struct llc_snap_hdr_t *llc_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700385
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 /* (generic?) 802.11 */
387 l2_hdr_size = sizeof(struct ieee80211_frame);
388 llc_hdr = (struct llc_snap_hdr_t *)
389 (p + l2_hdr_size);
390 l2_hdr_size += LLC_SNAP_HDR_LEN;
391 ethtype = (llc_hdr->ethertype[0] << 8) |
392 llc_hdr->ethertype[1];
393 }
394 if (ethtype == ETHERTYPE_IPV4) {
395 struct ipv4_hdr_t *ipv4_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700396
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800397 ipv4_hdr =
398 (struct ipv4_hdr_t *)(p + l2_hdr_size);
399 ip_prot = ipv4_hdr->protocol;
400 tcp_offset = l2_hdr_size + IPV4_HDR_LEN;
401 } else if (ethtype == ETHERTYPE_IPV6) {
402 struct ipv6_hdr_t *ipv6_hdr;
Yun Park641304c2017-04-09 10:16:11 -0700403
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 ipv6_hdr =
405 (struct ipv6_hdr_t *)(p + l2_hdr_size);
406 ip_prot = ipv6_hdr->next_hdr;
407 tcp_offset = l2_hdr_size + IPV6_HDR_LEN;
408 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530409 QDF_TRACE(QDF_MODULE_ID_TXRX,
410 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700411 "frame %pK non-IP ethertype (%x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412 frm, ethtype);
413 goto NOT_IP_TCP;
414 }
415 if (ip_prot == IP_PROTOCOL_TCP) {
416#if NEVERDEFINED
417 struct tcp_hdr_t *tcp_hdr;
418 uint32_t tcp_seq_num;
Yun Park641304c2017-04-09 10:16:11 -0700419
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800420 tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset);
421 tcp_seq_num =
422 (tcp_hdr->seq_num[0] << 24) |
423 (tcp_hdr->seq_num[1] << 16) |
424 (tcp_hdr->seq_num[1] << 8) |
425 (tcp_hdr->seq_num[1] << 0);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530426 QDF_TRACE(QDF_MODULE_ID_TXRX,
427 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700428 "frame %pK: TCP seq num = %d\n", frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429 tcp_seq_num);
430#else
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530431 QDF_TRACE(QDF_MODULE_ID_TXRX,
432 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700433 "frame %pK: TCP seq num = %d\n", frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800434 ((*(p + tcp_offset + 4)) << 24) |
435 ((*(p + tcp_offset + 5)) << 16) |
436 ((*(p + tcp_offset + 6)) << 8) |
437 (*(p + tcp_offset + 7)));
438#endif
439 } else {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530440 QDF_TRACE(QDF_MODULE_ID_TXRX,
441 QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700442 "frame %pK non-TCP IP protocol (%x)\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800443 frm, ip_prot);
444 }
445 }
446NOT_IP_TCP:
447 if (display_options & ol_txrx_frm_dump_contents) {
448 int i, frag_num, len_lim;
Yun Park641304c2017-04-09 10:16:11 -0700449
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450 len_lim = max_len;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530451 if (len_lim > qdf_nbuf_len(frm))
452 len_lim = qdf_nbuf_len(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800453 if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
454 len_lim = TXRX_FRM_DUMP_MAX_LEN;
455
456 /*
457 * Gather frame contents from netbuf fragments
458 * into a contiguous buffer.
459 */
460 frag_num = 0;
461 i = 0;
462 while (i < len_lim) {
463 int frag_bytes;
Yun Park641304c2017-04-09 10:16:11 -0700464
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465 frag_bytes =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530466 qdf_nbuf_get_frag_len(frm, frag_num);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800467 if (frag_bytes > len_lim - i)
468 frag_bytes = len_lim - i;
469 if (frag_bytes > 0) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530470 p = qdf_nbuf_get_frag_vaddr(frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800471 frag_num);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530472 qdf_mem_copy(&local_buf[i], p,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473 frag_bytes);
474 }
475 frag_num++;
476 i += frag_bytes;
477 }
478
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530479 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700480 "frame %pK data (%pK), hex dump of bytes 0-%d of %d:\n",
Yun Park641304c2017-04-09 10:16:11 -0700481 frm, p, len_lim - 1, (int)qdf_nbuf_len(frm));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482 p = local_buf;
483 while (len_lim > 16) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530484 QDF_TRACE(QDF_MODULE_ID_TXRX,
485 QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 " " /* indent */
487 "%02x %02x %02x %02x %02x %02x %02x %02x "
488 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
489 *(p + 0), *(p + 1), *(p + 2),
490 *(p + 3), *(p + 4), *(p + 5),
491 *(p + 6), *(p + 7), *(p + 8),
492 *(p + 9), *(p + 10), *(p + 11),
493 *(p + 12), *(p + 13), *(p + 14),
494 *(p + 15));
495 p += 16;
496 len_lim -= 16;
497 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530498 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499 " " /* indent */);
500 while (len_lim > 0) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530501 QDF_TRACE(QDF_MODULE_ID_TXRX,
502 QDF_TRACE_LEVEL_INFO, "%02x ", *p);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503 p++;
504 len_lim--;
505 }
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530506 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800507 "\n");
508 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530509 frm = qdf_nbuf_next(frm);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800510 }
511}
512#else
513#define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len)
514#endif /* TXRX_DEBUG_DATA */
515
516#ifdef SUPPORT_HOST_STATISTICS
517
518#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \
519 ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \
Yun Park641304c2017-04-09 10:16:11 -0700520 sec_type, is_mcast)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521
522#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \
523 do { \
524 int is_mcast; \
525 enum htt_sec_type sec_type; \
526 is_mcast = htt_rx_msdu_is_wlan_mcast( \
527 pdev->htt_pdev, rx_desc); \
528 sec_type = peer->security[is_mcast \
529 ? txrx_sec_mcast \
530 : txrx_sec_ucast].sec_type; \
531 OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \
532 pdev->sec_types[sec_type], \
533 is_mcast); \
534 } while (false)
535
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530536#ifdef CONFIG_HL_SUPPORT
537
538 /**
539 * ol_rx_err_inv_get_wifi_header() - retrieve wifi header
540 * @pdev: handle to the physical device
541 * @rx_msdu: msdu of which header needs to be retrieved
542 *
543 * Return: wifi header
544 */
545 static inline
546 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
547 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
548 {
549 return NULL;
550 }
551#else
552
553 static inline
554 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
555 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
556 {
557 struct ieee80211_frame *wh = NULL;
Yun Park641304c2017-04-09 10:16:11 -0700558
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530559 if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi)
560 /* For windows, it is always native wifi header .*/
561 wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu);
562
563 return wh;
564 }
565#endif
566
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567#define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
568 do { \
569 struct ieee80211_frame *wh = NULL; \
570 /*FIX THIS : */ \
571 /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \
572 /*But at present it seems it does not work.*/ \
573 /*wh = (struct ieee80211_frame *) */ \
574 /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
575 /* this only apply to LL device.*/ \
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530576 wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800577 ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
578 wh, OL_RX_ERR_UNKNOWN_PEER); \
579 } while (false)
580
581#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \
582 do { \
583 enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \
584 if (rx_status == htt_rx_status_decrypt_err) \
585 err_type = OL_RX_ERR_DECRYPT; \
586 else if (rx_status == htt_rx_status_tkip_mic_err) \
587 err_type = OL_RX_ERR_TKIP_MIC; \
588 else if (rx_status == htt_rx_status_mpdu_length_err) \
589 err_type = OL_RX_ERR_MPDU_LENGTH; \
590 else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \
591 err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \
592 else if (rx_status == htt_rx_status_err_dup) \
593 err_type = OL_RX_ERR_DUP; \
594 else if (rx_status == htt_rx_status_err_fcs) \
595 err_type = OL_RX_ERR_FCS; \
596 else \
597 err_type = OL_RX_ERR_UNKNOWN; \
598 \
599 if (vdev != NULL && peer != NULL) { \
600 OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \
601 rx_mpdu_desc, err_type); \
602 } else { \
603 OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \
604 } \
605 } while (false)
606#else
607#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast)
608#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type)
609#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status)
610#endif /* SUPPORT_HOST_STATISTICS */
611
612#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
613#define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
614 do { \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530615 qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800616 peer->stats.tx_or_rx.frms.type += 1; \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530617 peer->stats.tx_or_rx.bytes.type += qdf_nbuf_len(msdu); \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530618 qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800619 } while (0)
620#define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
621 do { \
622 struct ol_txrx_vdev_t *vdev = peer->vdev; \
623 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
624 uint8_t *dest_addr; \
625 if (pdev->frame_format == wlan_frm_fmt_802_3) { \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530626 dest_addr = qdf_nbuf_data(msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627 } else { /* 802.11 format */ \
628 struct ieee80211_frame *frm; \
Nirav Shahcbc6d722016-03-01 16:24:53 +0530629 frm = (struct ieee80211_frame *) qdf_nbuf_data(msdu); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800630 if (vdev->opmode == wlan_op_mode_ap) { \
631 dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
632 } else { \
633 dest_addr = (uint8_t *) &(frm->i_addr3[0]); \
634 } \
635 } \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530636 if (qdf_unlikely(IEEE80211_IS_BROADCAST(dest_addr))) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
638 bcast, msdu); \
Anurag Chouhanc5548422016-02-24 18:33:27 +0530639 } else if (qdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800640 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
641 mcast, msdu); \
642 } else { \
643 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
644 ucast, msdu); \
645 } \
646 } while (0)
647#define OL_TX_PEER_STATS_UPDATE(peer, msdu) \
648 OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu)
649#define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
650 OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
651#define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530652 qdf_spinlock_create(&pdev->peer_stat_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530654 qdf_spinlock_destroy(&pdev->peer_stat_mutex)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655#else
656#define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
657#define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
658#define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */
659#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */
660#endif
661
662#ifndef DEBUG_HTT_CREDIT
663#define DEBUG_HTT_CREDIT 0
664#endif
665
666#if defined(FEATURE_TSO_DEBUG)
Nirav Shahda008342016-05-17 18:50:40 +0530667#define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) \
668 do { \
669 if (_p_cntrs == 1) { \
670 TXRX_STATS_ADD(_pdev, pub.tx.tso.tso_hist.pkts_1, 1); \
Yun Park641304c2017-04-09 10:16:11 -0700671 } else if (_p_cntrs >= 2 && _p_cntrs <= 5) { \
Nirav Shahda008342016-05-17 18:50:40 +0530672 TXRX_STATS_ADD(_pdev, \
673 pub.tx.tso.tso_hist.pkts_2_5, 1); \
674 } else if (_p_cntrs > 5 && _p_cntrs <= 10) { \
675 TXRX_STATS_ADD(_pdev, \
676 pub.tx.tso.tso_hist.pkts_6_10, 1); \
677 } else if (_p_cntrs > 10 && _p_cntrs <= 15) { \
678 TXRX_STATS_ADD(_pdev, \
679 pub.tx.tso.tso_hist.pkts_11_15, 1); \
680 } else if (_p_cntrs > 15 && _p_cntrs <= 20) { \
681 TXRX_STATS_ADD(_pdev, \
682 pub.tx.tso.tso_hist.pkts_16_20, 1); \
683 } else if (_p_cntrs > 20) { \
684 TXRX_STATS_ADD(_pdev, \
685 pub.tx.tso.tso_hist.pkts_20_plus, 1); \
686 } \
687 } while (0)
688
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530689#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 do { \
Yun Park641304c2017-04-09 10:16:11 -0700691 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg \
692 = 0; \
693 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx \
694 = 0; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 } while (0)
696
697#define TXRX_STATS_TSO_MSDU_IDX(pdev) \
698 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx
699
700#define TXRX_STATS_TSO_MSDU(pdev, idx) \
701 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx]
702
703#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \
704 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg
705
Nirav Shahda008342016-05-17 18:50:40 +0530706#define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) \
707 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].gso_size
708
709#define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) \
710 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].total_len
711
712#define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) \
713 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].nr_frags
714
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530715#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) \
716 TXRX_STATS_TSO_MSDU(pdev, idx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800717
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530718#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) \
719 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx
720
721#define TXRX_STATS_TSO_INC_SEG(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722 do { \
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530723 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg++; \
724 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg &= \
Nirav Shahda008342016-05-17 18:50:40 +0530725 NUM_MAX_TSO_SEGS_MASK; \
726 } while (0)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800727
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530728#define TXRX_STATS_TSO_RST_SEG(pdev, idx) \
729 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg = 0
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530731#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) \
732 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx = 0
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733
734#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
735 TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
736
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530737#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) \
738 TXRX_STATS_TSO_SEG(pdev, idx, \
739 TXRX_STATS_TSO_SEG_IDX(pdev, idx)) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800740
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530741#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800742 do { \
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530743 TXRX_STATS_TSO_SEG_IDX(pdev, idx)++; \
744 TXRX_STATS_TSO_SEG_IDX(pdev, idx) &= NUM_MAX_TSO_SEGS_MASK; \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800745 } while (0)
746
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530747#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) \
748 (TXRX_STATS_TSO_CURR_SEG(pdev, idx) = tso_seg)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800749
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530750#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) \
751 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).gso_size = size)
Nirav Shahda008342016-05-17 18:50:40 +0530752
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530753#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) \
754 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).total_len = len)
Nirav Shahda008342016-05-17 18:50:40 +0530755
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530756#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) \
757 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).nr_frags = frags)
Nirav Shahda008342016-05-17 18:50:40 +0530758
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759#else
Nirav Shahda008342016-05-17 18:50:40 +0530760#define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530761#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762#define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
763#define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
764#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530765#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530767#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800768#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
Himanshu Agarwal5501c192017-02-14 11:39:39 +0530769#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) /* no-op */
770#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) /* no-op */
771#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) /* no-op */
772#define TXRX_STATS_TSO_INC_SEG(pdev, idx) /* no-op */
773#define TXRX_STATS_TSO_RST_SEG(pdev, idx) /* no-op */
774#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) /* no-op */
775#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) /* no-op */
776#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) /* no-op */
777#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) /* no-op */
Nirav Shahda008342016-05-17 18:50:40 +0530778#define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) /* no-op */
779#define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) /* no-op */
780#define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) /* no-op */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800781
782#endif /* FEATURE_TSO_DEBUG */
783
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530784#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
785
786void
787ol_txrx_update_group_credit(
788 struct ol_tx_queue_group_t *group,
789 int32_t credit,
790 u_int8_t absolute);
791#endif
792
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800793#endif /* _OL_TXRX_INTERNAL__H_ */