blob: 30315e64ce1beffcb24ba93e4eec7059b8f014e0 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#ifndef _OL_TXRX_INTERNAL__H_
29#define _OL_TXRX_INTERNAL__H_
30
31#include <cdf_util.h> /* cdf_assert */
32#include <cdf_nbuf.h> /* cdf_nbuf_t */
33#include <cdf_memory.h> /* cdf_mem_set */
34#include <cds_ieee80211_common.h> /* ieee80211_frame */
35#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
36
37#include <ol_txrx_types.h>
38
39#include <ol_txrx_dbg.h>
40#include <enet.h> /* ETHERNET_HDR_LEN, etc. */
41#include <ipv4.h> /* IPV4_HDR_LEN, etc. */
42#include <ipv6.h> /* IPV6_HDR_LEN, etc. */
43#include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */
44
45#ifdef ATH_11AC_TXCOMPACT
46#define OL_TX_DESC_NO_REFS(tx_desc) 1
47#define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */
48#define OL_TX_DESC_REF_INC(tx_desc) /* no-op */
49#else
50#define OL_TX_DESC_NO_REFS(tx_desc) \
51 cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
52#define OL_TX_DESC_REF_INIT(tx_desc) cdf_atomic_init(&tx_desc->ref_cnt)
53#define OL_TX_DESC_REF_INC(tx_desc) cdf_atomic_inc(&tx_desc->ref_cnt)
54#endif
55
56#ifndef TXRX_ASSERT_LEVEL
57#define TXRX_ASSERT_LEVEL 3
58#endif
59
60#ifdef __KLOCWORK__
61#define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0)
62#define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0)
63#else /* #ifdef __KLOCWORK__ */
64
65#if TXRX_ASSERT_LEVEL > 0
66#define TXRX_ASSERT1(condition) cdf_assert((condition))
67#else
68#define TXRX_ASSERT1(condition)
69#endif
70
71#if TXRX_ASSERT_LEVEL > 1
72#define TXRX_ASSERT2(condition) cdf_assert((condition))
73#else
74#define TXRX_ASSERT2(condition)
75#endif
76#endif /* #ifdef __KLOCWORK__ */
77enum {
78 /* FATAL_ERR - print only irrecoverable error messages */
79 TXRX_PRINT_LEVEL_FATAL_ERR,
80
81 /* ERR - include non-fatal err messages */
82 TXRX_PRINT_LEVEL_ERR,
83
84 /* WARN - include warnings */
85 TXRX_PRINT_LEVEL_WARN,
86
87 /* INFO1 - include fundamental, infrequent events */
88 TXRX_PRINT_LEVEL_INFO1,
89
90 /* INFO2 - include non-fundamental but infrequent events */
91 TXRX_PRINT_LEVEL_INFO2,
92
93 /* INFO3 - include frequent events */
94 /* to avoid performance impact, don't use INFO3
95 unless explicitly enabled */
96#ifdef TXRX_PRINT_VERBOSE_ENABLE
97 TXRX_PRINT_LEVEL_INFO3,
98#endif /* TXRX_PRINT_VERBOSE_ENABLE */
99};
100
101extern unsigned g_txrx_print_level;
102
103#ifdef TXRX_PRINT_ENABLE
104
105#include <stdarg.h> /* va_list */
106#include <cdf_types.h> /* cdf_vprint */
107
108/* Supress 4296 - expression is always true
109* It will fire if level is TXRX_PRINT_LEVEL_FATAL_ERR (0)
110* because g_txrx_print_level is unsigned */
111#define ol_txrx_print(level, fmt, ...) { \
112 if (level <= g_txrx_print_level) \
113 cdf_print(fmt, ## __VA_ARGS__); }
114#define TXRX_PRINT(level, fmt, ...) \
115 ol_txrx_print(level, "TXRX: " fmt, ## __VA_ARGS__)
116
117#ifdef TXRX_PRINT_VERBOSE_ENABLE
118
119#define ol_txrx_print_verbose(fmt, ...) { \
120 if (TXRX_PRINT_LEVEL_INFO3 <= g_txrx_print_level) \
121 cdf_print(fmt, ## __VA_ARGS__); }
122#define TXRX_PRINT_VERBOSE(fmt, ...) \
123 ol_txrx_print_verbose("TXRX: " fmt, ## __VA_ARGS__)
124#else
125#define TXRX_PRINT_VERBOSE(fmt, ...)
126#endif /* TXRX_PRINT_VERBOSE_ENABLE */
127
128/* define PN check failure message print rate
129 as 1 second */
130#define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000
131
132#else
133#define TXRX_PRINT(level, fmt, ...)
134#define TXRX_PRINT_VERBOSE(fmt, ...)
135#endif /* TXRX_PRINT_ENABLE */
136
137/*--- tx credit debug printouts ---*/
138
139#ifndef DEBUG_CREDIT
140#define DEBUG_CREDIT 0
141#endif
142
143#if DEBUG_CREDIT
144#define TX_CREDIT_DEBUG_PRINT(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
145#else
146#define TX_CREDIT_DEBUG_PRINT(fmt, ...)
147#endif
148
149/*--- tx scheduler debug printouts ---*/
150
151#ifdef HOST_TX_SCHED_DEBUG
152#define TX_SCHED_DEBUG_PRINT(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
153#else
154#define TX_SCHED_DEBUG_PRINT(fmt, ...)
155#endif
156#define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
157
158#define OL_TXRX_LIST_APPEND(head, tail, elem) \
159 do { \
160 if (!(head)) { \
161 (head) = (elem); \
162 } else { \
163 cdf_nbuf_set_next((tail), (elem)); \
164 } \
165 (tail) = (elem); \
166 } while (0)
167
168static inline void
169ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
170 void *mpdu_list,
171 cdf_nbuf_t *mpdu_tail, cdf_nbuf_t *next_mpdu)
172{
173 htt_pdev_handle htt_pdev = pdev->htt_pdev;
174 cdf_nbuf_t msdu;
175
176 /*
177 * For now, we use a simply flat list of MSDUs.
178 * So, traverse the list until we reach the last MSDU within the MPDU.
179 */
180 TXRX_ASSERT2(mpdu_list);
181 msdu = mpdu_list;
182 while (!htt_rx_msdu_desc_completes_mpdu
183 (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
184 msdu = cdf_nbuf_next(msdu);
185 TXRX_ASSERT2(msdu);
186 }
187 /* msdu now points to the last MSDU within the first MPDU */
188 *mpdu_tail = msdu;
189 *next_mpdu = cdf_nbuf_next(msdu);
190}
191
192/*--- txrx stats macros ---*/
193
194/* unconditional defs */
195#define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1)
196
197/* default conditional defs (may be undefed below) */
198
199#define TXRX_STATS_INIT(_pdev) \
200 cdf_mem_set(&((_pdev)->stats), sizeof((_pdev)->stats), 0x0)
201#define TXRX_STATS_ADD(_pdev, _field, _delta) { \
202 _pdev->stats._field += _delta; }
203#define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
204 do { \
205 TXRX_STATS_INCR((pdev), pub.field.pkts); \
206 TXRX_STATS_ADD((pdev), pub.field.bytes, cdf_nbuf_len(netbuf)); \
207 } while (0)
208
209/* conditional defs based on verbosity level */
210
211
212#define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
213 do { \
214 cdf_nbuf_t tmp_list = netbuf_list; \
215 while (tmp_list) { \
216 TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
217 tmp_list = cdf_nbuf_next(tmp_list); \
218 } \
219 } while (0)
220
221#define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \
222 if (status == htt_tx_status_ok) \
223 TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \
224 else if (status == htt_tx_status_discard) \
225 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \
226 netbuf); \
227 else if (status == htt_tx_status_no_ack) \
228 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \
229 else if (status == htt_tx_status_download_fail) \
230 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \
231 netbuf); \
232 else \
233 /* NO-OP */; \
234 } while (0)
235
236#define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \
237 do { \
238 if (_p_cntrs == 1) { \
239 TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\
240 } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \
241 TXRX_STATS_ADD(_pdev, \
242 pub.tx.comp_histogram.pkts_2_10, 1); \
243 } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \
244 TXRX_STATS_ADD(_pdev, \
245 pub.tx.comp_histogram.pkts_11_20, 1); \
246 } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \
247 TXRX_STATS_ADD(_pdev, \
248 pub.tx.comp_histogram.pkts_21_30, 1); \
249 } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \
250 TXRX_STATS_ADD(_pdev, \
251 pub.tx.comp_histogram.pkts_31_40, 1); \
252 } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \
253 TXRX_STATS_ADD(_pdev, \
254 pub.tx.comp_histogram.pkts_41_50, 1); \
255 } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \
256 TXRX_STATS_ADD(_pdev, \
257 pub.tx.comp_histogram.pkts_51_60, 1); \
258 } else { \
259 TXRX_STATS_ADD(_pdev, \
260 pub.tx.comp_histogram.pkts_61_plus, 1); \
261 } \
262 } while (0)
263
264#define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \
265 do { \
266 switch (status) { \
267 case htt_tx_status_ok: \
268 TXRX_STATS_ADD(_pdev, \
269 pub.tx.delivered.pkts, _p_cntrs); \
270 TXRX_STATS_ADD(_pdev, \
271 pub.tx.delivered.bytes, _b_cntrs); \
272 break; \
273 case htt_tx_status_discard: \
274 TXRX_STATS_ADD(_pdev, \
275 pub.tx.dropped.target_discard.pkts, _p_cntrs);\
276 TXRX_STATS_ADD(_pdev, \
277 pub.tx.dropped.target_discard.bytes, _b_cntrs);\
278 break; \
279 case htt_tx_status_no_ack: \
280 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \
281 _p_cntrs); \
282 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \
283 _b_cntrs); \
284 break; \
285 case htt_tx_status_download_fail: \
286 TXRX_STATS_ADD(_pdev, \
287 pub.tx.dropped.download_fail.pkts, _p_cntrs); \
288 TXRX_STATS_ADD(_pdev, \
289 pub.tx.dropped.download_fail.bytes, _b_cntrs);\
290 break; \
291 default: \
292 break; \
293 } \
294 TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \
295 } while (0)
296
297
298/*--- txrx sequence number trace macros ---*/
299
300#define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status)
301
302#if defined(ENABLE_RX_REORDER_TRACE)
303
304A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev);
305void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev);
306void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
307 uint8_t tid,
308 uint16_t reorder_idx,
309 uint16_t seq_num, int num_mpdus);
310
311#define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach
312#define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach
313#define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add
314
315#else
316
317#define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK
318#define OL_RX_REORDER_TRACE_DETACH(_pdev)
319#define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus)
320
321#endif /* ENABLE_RX_REORDER_TRACE */
322
323/*--- txrx packet number trace macros ---*/
324
325#if defined(ENABLE_RX_PN_TRACE)
326
327A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev);
328void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev);
329void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
330 struct ol_txrx_peer_t *peer,
331 uint16_t tid, void *rx_desc);
332
333#define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach
334#define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach
335#define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add
336
337#else
338
339#define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK
340#define OL_RX_PN_TRACE_DETACH(_pdev)
341#define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc)
342
343#endif /* ENABLE_RX_PN_TRACE */
344
345static inline int ol_txrx_ieee80211_hdrsize(const void *data)
346{
347 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
348 int size = sizeof(struct ieee80211_frame);
349
350 /* NB: we don't handle control frames */
351 TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
352 IEEE80211_FC0_TYPE_CTL);
353 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
354 IEEE80211_FC1_DIR_DSTODS)
355 size += IEEE80211_ADDR_LEN;
356 if (IEEE80211_QOS_HAS_SEQ(wh)) {
357 size += sizeof(uint16_t);
358 /* Qos frame with Order bit set indicates an HTC frame */
359 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
360 size += sizeof(struct ieee80211_htc);
361 }
362 return size;
363}
364
365/*--- frame display utility ---*/
366
367enum ol_txrx_frm_dump_options {
368 ol_txrx_frm_dump_contents = 0x1,
369 ol_txrx_frm_dump_tcp_seq = 0x2,
370};
371
372#ifdef TXRX_DEBUG_DATA
373static inline void
374ol_txrx_frms_dump(const char *name,
375 struct ol_txrx_pdev_t *pdev,
376 cdf_nbuf_t frm,
377 enum ol_txrx_frm_dump_options display_options, int max_len)
378{
379#define TXRX_FRM_DUMP_MAX_LEN 128
380 uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 };
381 uint8_t *p;
382
383 if (name) {
384 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO, "%s\n",
385 name);
386 }
387 while (frm) {
388 p = cdf_nbuf_data(frm);
389 if (display_options & ol_txrx_frm_dump_tcp_seq) {
390 int tcp_offset;
391 int l2_hdr_size;
392 uint16_t ethtype;
393 uint8_t ip_prot;
394
395 if (pdev->frame_format == wlan_frm_fmt_802_3) {
396 struct ethernet_hdr_t *enet_hdr =
397 (struct ethernet_hdr_t *)p;
398 l2_hdr_size = ETHERNET_HDR_LEN;
399
400 /*
401 * LLC/SNAP present?
402 */
403 ethtype = (enet_hdr->ethertype[0] << 8) |
404 enet_hdr->ethertype[1];
405 if (!IS_ETHERTYPE(ethertype)) {
406 /* 802.3 format */
407 struct llc_snap_hdr_t *llc_hdr;
408
409 llc_hdr = (struct llc_snap_hdr_t *)
410 (p + l2_hdr_size);
411 l2_hdr_size += LLC_SNAP_HDR_LEN;
412 ethtype = (llc_hdr->ethertype[0] << 8) |
413 llc_hdr->ethertype[1];
414 }
415 } else {
416 struct llc_snap_hdr_t *llc_hdr;
417 /* (generic?) 802.11 */
418 l2_hdr_size = sizeof(struct ieee80211_frame);
419 llc_hdr = (struct llc_snap_hdr_t *)
420 (p + l2_hdr_size);
421 l2_hdr_size += LLC_SNAP_HDR_LEN;
422 ethtype = (llc_hdr->ethertype[0] << 8) |
423 llc_hdr->ethertype[1];
424 }
425 if (ethtype == ETHERTYPE_IPV4) {
426 struct ipv4_hdr_t *ipv4_hdr;
427 ipv4_hdr =
428 (struct ipv4_hdr_t *)(p + l2_hdr_size);
429 ip_prot = ipv4_hdr->protocol;
430 tcp_offset = l2_hdr_size + IPV4_HDR_LEN;
431 } else if (ethtype == ETHERTYPE_IPV6) {
432 struct ipv6_hdr_t *ipv6_hdr;
433 ipv6_hdr =
434 (struct ipv6_hdr_t *)(p + l2_hdr_size);
435 ip_prot = ipv6_hdr->next_hdr;
436 tcp_offset = l2_hdr_size + IPV6_HDR_LEN;
437 } else {
438 CDF_TRACE(CDF_MODULE_ID_TXRX,
439 CDF_TRACE_LEVEL_INFO,
440 "frame %p non-IP ethertype (%x)\n",
441 frm, ethtype);
442 goto NOT_IP_TCP;
443 }
444 if (ip_prot == IP_PROTOCOL_TCP) {
445#if NEVERDEFINED
446 struct tcp_hdr_t *tcp_hdr;
447 uint32_t tcp_seq_num;
448 tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset);
449 tcp_seq_num =
450 (tcp_hdr->seq_num[0] << 24) |
451 (tcp_hdr->seq_num[1] << 16) |
452 (tcp_hdr->seq_num[1] << 8) |
453 (tcp_hdr->seq_num[1] << 0);
454 CDF_TRACE(CDF_MODULE_ID_TXRX,
455 CDF_TRACE_LEVEL_INFO,
456 "frame %p: TCP seq num = %d\n", frm,
457 tcp_seq_num);
458#else
459 CDF_TRACE(CDF_MODULE_ID_TXRX,
460 CDF_TRACE_LEVEL_INFO,
461 "frame %p: TCP seq num = %d\n", frm,
462 ((*(p + tcp_offset + 4)) << 24) |
463 ((*(p + tcp_offset + 5)) << 16) |
464 ((*(p + tcp_offset + 6)) << 8) |
465 (*(p + tcp_offset + 7)));
466#endif
467 } else {
468 CDF_TRACE(CDF_MODULE_ID_TXRX,
469 CDF_TRACE_LEVEL_INFO,
470 "frame %p non-TCP IP protocol (%x)\n",
471 frm, ip_prot);
472 }
473 }
474NOT_IP_TCP:
475 if (display_options & ol_txrx_frm_dump_contents) {
476 int i, frag_num, len_lim;
477 len_lim = max_len;
478 if (len_lim > cdf_nbuf_len(frm))
479 len_lim = cdf_nbuf_len(frm);
480 if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
481 len_lim = TXRX_FRM_DUMP_MAX_LEN;
482
483 /*
484 * Gather frame contents from netbuf fragments
485 * into a contiguous buffer.
486 */
487 frag_num = 0;
488 i = 0;
489 while (i < len_lim) {
490 int frag_bytes;
491 frag_bytes =
492 cdf_nbuf_get_frag_len(frm, frag_num);
493 if (frag_bytes > len_lim - i)
494 frag_bytes = len_lim - i;
495 if (frag_bytes > 0) {
496 p = cdf_nbuf_get_frag_vaddr(frm,
497 frag_num);
498 cdf_mem_copy(&local_buf[i], p,
499 frag_bytes);
500 }
501 frag_num++;
502 i += frag_bytes;
503 }
504
505 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
506 "frame %p data (%p), hex dump of bytes 0-%d of %d:\n",
507 frm, p, len_lim - 1, (int)cdf_nbuf_len(frm));
508 p = local_buf;
509 while (len_lim > 16) {
510 CDF_TRACE(CDF_MODULE_ID_TXRX,
511 CDF_TRACE_LEVEL_INFO,
512 " " /* indent */
513 "%02x %02x %02x %02x %02x %02x %02x %02x "
514 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
515 *(p + 0), *(p + 1), *(p + 2),
516 *(p + 3), *(p + 4), *(p + 5),
517 *(p + 6), *(p + 7), *(p + 8),
518 *(p + 9), *(p + 10), *(p + 11),
519 *(p + 12), *(p + 13), *(p + 14),
520 *(p + 15));
521 p += 16;
522 len_lim -= 16;
523 }
524 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
525 " " /* indent */);
526 while (len_lim > 0) {
527 CDF_TRACE(CDF_MODULE_ID_TXRX,
528 CDF_TRACE_LEVEL_INFO, "%02x ", *p);
529 p++;
530 len_lim--;
531 }
532 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
533 "\n");
534 }
535 frm = cdf_nbuf_next(frm);
536 }
537}
538#else
539#define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len)
540#endif /* TXRX_DEBUG_DATA */
541
542#ifdef SUPPORT_HOST_STATISTICS
543
544#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \
545 ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \
546 sec_type, is_mcast);
547
548#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \
549 do { \
550 int is_mcast; \
551 enum htt_sec_type sec_type; \
552 is_mcast = htt_rx_msdu_is_wlan_mcast( \
553 pdev->htt_pdev, rx_desc); \
554 sec_type = peer->security[is_mcast \
555 ? txrx_sec_mcast \
556 : txrx_sec_ucast].sec_type; \
557 OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \
558 pdev->sec_types[sec_type], \
559 is_mcast); \
560 } while (false)
561
562#define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
563 do { \
564 struct ieee80211_frame *wh = NULL; \
565 /*FIX THIS : */ \
566 /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \
567 /*But at present it seems it does not work.*/ \
568 /*wh = (struct ieee80211_frame *) */ \
569 /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
570 /* this only apply to LL device.*/ \
571 if (ol_cfg_frame_type(pdev->ctrl_pdev) == \
572 wlan_frm_fmt_native_wifi) { \
573 /* For windows, it is always native wifi header .*/ \
574 wh = (struct ieee80211_frame *)cdf_nbuf_data(rx_msdu); \
575 } \
576 ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
577 wh, OL_RX_ERR_UNKNOWN_PEER); \
578 } while (false)
579
580#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \
581 do { \
582 enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \
583 if (rx_status == htt_rx_status_decrypt_err) \
584 err_type = OL_RX_ERR_DECRYPT; \
585 else if (rx_status == htt_rx_status_tkip_mic_err) \
586 err_type = OL_RX_ERR_TKIP_MIC; \
587 else if (rx_status == htt_rx_status_mpdu_length_err) \
588 err_type = OL_RX_ERR_MPDU_LENGTH; \
589 else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \
590 err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \
591 else if (rx_status == htt_rx_status_err_dup) \
592 err_type = OL_RX_ERR_DUP; \
593 else if (rx_status == htt_rx_status_err_fcs) \
594 err_type = OL_RX_ERR_FCS; \
595 else \
596 err_type = OL_RX_ERR_UNKNOWN; \
597 \
598 if (vdev != NULL && peer != NULL) { \
599 OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \
600 rx_mpdu_desc, err_type); \
601 } else { \
602 OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \
603 } \
604 } while (false)
605#else
606#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast)
607#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type)
608#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status)
609#endif /* SUPPORT_HOST_STATISTICS */
610
611#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
612#define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
613 do { \
614 cdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
615 peer->stats.tx_or_rx.frms.type += 1; \
616 peer->stats.tx_or_rx.bytes.type += cdf_nbuf_len(msdu); \
617 cdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
618 } while (0)
619#define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
620 do { \
621 struct ol_txrx_vdev_t *vdev = peer->vdev; \
622 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
623 uint8_t *dest_addr; \
624 if (pdev->frame_format == wlan_frm_fmt_802_3) { \
625 dest_addr = cdf_nbuf_data(msdu); \
626 } else { /* 802.11 format */ \
627 struct ieee80211_frame *frm; \
628 frm = (struct ieee80211_frame *) cdf_nbuf_data(msdu); \
629 if (vdev->opmode == wlan_op_mode_ap) { \
630 dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
631 } else { \
632 dest_addr = (uint8_t *) &(frm->i_addr3[0]); \
633 } \
634 } \
635 if (cdf_unlikely(IEEE80211_IS_BROADCAST(dest_addr))) { \
636 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
637 bcast, msdu); \
638 } else if (cdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \
639 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
640 mcast, msdu); \
641 } else { \
642 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
643 ucast, msdu); \
644 } \
645 } while (0)
646#define OL_TX_PEER_STATS_UPDATE(peer, msdu) \
647 OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu)
648#define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
649 OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
650#define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
651 cdf_spinlock_init(&pdev->peer_stat_mutex)
652#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
653 cdf_spinlock_destroy(&pdev->peer_stat_mutex)
654#else
655#define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
656#define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
657#define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */
658#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */
659#endif
660
661#ifndef DEBUG_HTT_CREDIT
662#define DEBUG_HTT_CREDIT 0
663#endif
664
665#if defined(FEATURE_TSO_DEBUG)
666#define TXRX_STATS_TSO_RESET_MSDU(pdev) \
667 do { \
668 int idx = TXRX_STATS_TSO_MSDU_IDX(pdev);\
669 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg = 0; \
670 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx = 0; \
671 } while (0)
672
673#define TXRX_STATS_TSO_MSDU_IDX(pdev) \
674 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx
675
676#define TXRX_STATS_TSO_MSDU(pdev, idx) \
677 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx]
678
679#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \
680 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg
681
682#define TXRX_STATS_TSO_CURR_MSDU(pdev) \
683 TXRX_STATS_TSO_MSDU(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev))
684
685#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) \
686 do { \
687 TXRX_STATS_TSO_MSDU_IDX(pdev)++; \
688 TXRX_STATS_TSO_MSDU_IDX(pdev) &= NUM_MAX_TSO_MSDUS_MASK; \
689 } while (0)
690
691#define TXRX_STATS_TSO_SEG_IDX(pdev) \
692 TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx
693
694#define TXRX_STATS_TSO_INC_SEG(pdev) \
695 TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg++
696
697#define TXRX_STATS_TSO_RST_SEG(pdev) \
698 TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg = 0
699
700#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) \
701 TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx = 0
702
703#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
704 TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
705
706#define TXRX_STATS_TSO_CURR_SEG(pdev) \
707 TXRX_STATS_TSO_SEG(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev), \
708 TXRX_STATS_TSO_SEG_IDX(pdev)) \
709
710#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) \
711 do { \
712 TXRX_STATS_TSO_SEG_IDX(pdev)++; \
713 TXRX_STATS_TSO_SEG_IDX(pdev) &= NUM_MAX_TSO_SEGS_MASK; \
714 } while (0)
715
716#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) \
717 (TXRX_STATS_TSO_CURR_SEG(pdev) = tso_seg)
718
719#else
720#define TXRX_STATS_TSO_RESET_MSDU(pdev) /* no-op */
721#define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
722#define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
723#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
724#define TXRX_STATS_TSO_CURR_MSDU(pdev) /* no-op */
725#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
726#define TXRX_STATS_TSO_SEG_IDX(pdev) /* no-op */
727#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
728#define TXRX_STATS_TSO_CURR_SEG(pdev) /* no-op */
729#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) /* no-op */
730#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) /* no-op */
731#define TXRX_STATS_TSO_INC_SEG(pdev) /* no-op */
732#define TXRX_STATS_TSO_RST_SEG(pdev) /* no-op */
733#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) /* no-op */
734
735#endif /* FEATURE_TSO_DEBUG */
736
737#endif /* _OL_TXRX_INTERNAL__H_ */