Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Nirav Shah | 99923a8 | 2018-06-23 14:35:49 +0530 | [diff] [blame^] | 2 | * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | #ifndef _OL_TXRX_INTERNAL__H_ |
| 20 | #define _OL_TXRX_INTERNAL__H_ |
| 21 | |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 22 | #include <qdf_util.h> /* qdf_assert */ |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 23 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 24 | #include <qdf_mem.h> /* qdf_mem_set */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 25 | #include <cds_ieee80211_common.h> /* ieee80211_frame */ |
| 26 | #include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */ |
| 27 | |
| 28 | #include <ol_txrx_types.h> |
| 29 | |
| 30 | #include <ol_txrx_dbg.h> |
| 31 | #include <enet.h> /* ETHERNET_HDR_LEN, etc. */ |
| 32 | #include <ipv4.h> /* IPV4_HDR_LEN, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 33 | #include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */ |
| 34 | |
| 35 | #ifdef ATH_11AC_TXCOMPACT |
| 36 | #define OL_TX_DESC_NO_REFS(tx_desc) 1 |
| 37 | #define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */ |
| 38 | #define OL_TX_DESC_REF_INC(tx_desc) /* no-op */ |
| 39 | #else |
| 40 | #define OL_TX_DESC_NO_REFS(tx_desc) \ |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 41 | qdf_atomic_dec_and_test(&tx_desc->ref_cnt) |
| 42 | #define OL_TX_DESC_REF_INIT(tx_desc) qdf_atomic_init(&tx_desc->ref_cnt) |
| 43 | #define OL_TX_DESC_REF_INC(tx_desc) qdf_atomic_inc(&tx_desc->ref_cnt) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 44 | #endif |
| 45 | |
| 46 | #ifndef TXRX_ASSERT_LEVEL |
| 47 | #define TXRX_ASSERT_LEVEL 3 |
| 48 | #endif |
| 49 | |
| 50 | #ifdef __KLOCWORK__ |
| 51 | #define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0) |
| 52 | #define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0) |
| 53 | #else /* #ifdef __KLOCWORK__ */ |
| 54 | |
| 55 | #if TXRX_ASSERT_LEVEL > 0 |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 56 | #define TXRX_ASSERT1(condition) qdf_assert((condition)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 57 | #else |
| 58 | #define TXRX_ASSERT1(condition) |
| 59 | #endif |
| 60 | |
| 61 | #if TXRX_ASSERT_LEVEL > 1 |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 62 | #define TXRX_ASSERT2(condition) qdf_assert((condition)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 63 | #else |
| 64 | #define TXRX_ASSERT2(condition) |
| 65 | #endif |
| 66 | #endif /* #ifdef __KLOCWORK__ */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 67 | |
| 68 | #ifdef TXRX_PRINT_ENABLE |
| 69 | |
| 70 | #include <stdarg.h> /* va_list */ |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 71 | #include <qdf_types.h> /* qdf_vprint */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 72 | |
Nirav Shah | 99923a8 | 2018-06-23 14:35:49 +0530 | [diff] [blame^] | 73 | #define ol_txrx_alert(params...) \ |
| 74 | QDF_TRACE_FATAL(QDF_MODULE_ID_TXRX, params) |
| 75 | #define ol_txrx_err(params...) \ |
| 76 | QDF_TRACE_ERROR(QDF_MODULE_ID_TXRX, params) |
| 77 | #define ol_txrx_warn(params...) \ |
| 78 | QDF_TRACE_WARN(QDF_MODULE_ID_TXRX, params) |
| 79 | #define ol_txrx_info(params...) \ |
| 80 | QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params) |
| 81 | #define ol_txrx_info_high(params...) \ |
| 82 | QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params) |
| 83 | #define ol_txrx_dbg(params...) \ |
| 84 | QDF_TRACE_DEBUG(QDF_MODULE_ID_TXRX, params) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 85 | |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 86 | /* |
| 87 | * define PN check failure message print rate |
| 88 | * as 1 second |
| 89 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 90 | #define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000 |
| 91 | |
| 92 | #else |
Poddar, Siddarth | 1452179 | 2017-03-14 21:19:42 +0530 | [diff] [blame] | 93 | #define ol_txrx_alert(format, args...) |
| 94 | #define ol_txrx_err(format, args...) |
| 95 | #define ol_txrx_warn(format, args...) |
| 96 | #define ol_txrx_info(format, args...) |
| 97 | #define ol_txrx_info_high(format, args...) |
| 98 | #define ol_txrx_dbg(format, args...) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 99 | #endif /* TXRX_PRINT_ENABLE */ |
| 100 | |
| 101 | /*--- tx credit debug printouts ---*/ |
| 102 | |
| 103 | #ifndef DEBUG_CREDIT |
| 104 | #define DEBUG_CREDIT 0 |
| 105 | #endif |
| 106 | |
| 107 | #if DEBUG_CREDIT |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 108 | #define TX_CREDIT_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 109 | #else |
| 110 | #define TX_CREDIT_DEBUG_PRINT(fmt, ...) |
| 111 | #endif |
| 112 | |
| 113 | /*--- tx scheduler debug printouts ---*/ |
| 114 | |
| 115 | #ifdef HOST_TX_SCHED_DEBUG |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 116 | #define TX_SCHED_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 117 | #else |
| 118 | #define TX_SCHED_DEBUG_PRINT(fmt, ...) |
| 119 | #endif |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 120 | #define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) qdf_print(fmt, ## __VA_ARGS__) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 121 | |
| 122 | #define OL_TXRX_LIST_APPEND(head, tail, elem) \ |
| 123 | do { \ |
| 124 | if (!(head)) { \ |
| 125 | (head) = (elem); \ |
| 126 | } else { \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 127 | qdf_nbuf_set_next((tail), (elem)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 128 | } \ |
| 129 | (tail) = (elem); \ |
| 130 | } while (0) |
| 131 | |
| 132 | static inline void |
| 133 | ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev, |
| 134 | void *mpdu_list, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 135 | qdf_nbuf_t *mpdu_tail, qdf_nbuf_t *next_mpdu) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 136 | { |
| 137 | htt_pdev_handle htt_pdev = pdev->htt_pdev; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 138 | qdf_nbuf_t msdu; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 139 | |
| 140 | /* |
| 141 | * For now, we use a simply flat list of MSDUs. |
| 142 | * So, traverse the list until we reach the last MSDU within the MPDU. |
| 143 | */ |
| 144 | TXRX_ASSERT2(mpdu_list); |
| 145 | msdu = mpdu_list; |
| 146 | while (!htt_rx_msdu_desc_completes_mpdu |
| 147 | (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 148 | msdu = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 149 | TXRX_ASSERT2(msdu); |
| 150 | } |
| 151 | /* msdu now points to the last MSDU within the first MPDU */ |
| 152 | *mpdu_tail = msdu; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 153 | *next_mpdu = qdf_nbuf_next(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 154 | } |
| 155 | |
| 156 | /*--- txrx stats macros ---*/ |
| 157 | |
| 158 | /* unconditional defs */ |
| 159 | #define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1) |
| 160 | |
| 161 | /* default conditional defs (may be undefed below) */ |
| 162 | |
| 163 | #define TXRX_STATS_INIT(_pdev) \ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 164 | qdf_mem_set(&((_pdev)->stats), sizeof((_pdev)->stats), 0x0) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 165 | #define TXRX_STATS_ADD(_pdev, _field, _delta) { \ |
| 166 | _pdev->stats._field += _delta; } |
| 167 | #define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \ |
| 168 | do { \ |
| 169 | TXRX_STATS_INCR((pdev), pub.field.pkts); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 170 | TXRX_STATS_ADD((pdev), pub.field.bytes, qdf_nbuf_len(netbuf)); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 171 | } while (0) |
| 172 | |
| 173 | /* conditional defs based on verbosity level */ |
| 174 | |
| 175 | |
| 176 | #define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \ |
| 177 | do { \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 178 | qdf_nbuf_t tmp_list = netbuf_list; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 179 | while (tmp_list) { \ |
| 180 | TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 181 | tmp_list = qdf_nbuf_next(tmp_list); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 182 | } \ |
| 183 | } while (0) |
| 184 | |
| 185 | #define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \ |
| 186 | if (status == htt_tx_status_ok) \ |
| 187 | TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \ |
| 188 | else if (status == htt_tx_status_discard) \ |
| 189 | TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \ |
| 190 | netbuf); \ |
| 191 | else if (status == htt_tx_status_no_ack) \ |
| 192 | TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \ |
| 193 | else if (status == htt_tx_status_download_fail) \ |
| 194 | TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \ |
| 195 | netbuf); \ |
| 196 | else \ |
| 197 | /* NO-OP */; \ |
| 198 | } while (0) |
| 199 | |
| 200 | #define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \ |
| 201 | do { \ |
| 202 | if (_p_cntrs == 1) { \ |
| 203 | TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\ |
| 204 | } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \ |
| 205 | TXRX_STATS_ADD(_pdev, \ |
| 206 | pub.tx.comp_histogram.pkts_2_10, 1); \ |
| 207 | } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \ |
| 208 | TXRX_STATS_ADD(_pdev, \ |
| 209 | pub.tx.comp_histogram.pkts_11_20, 1); \ |
| 210 | } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \ |
| 211 | TXRX_STATS_ADD(_pdev, \ |
| 212 | pub.tx.comp_histogram.pkts_21_30, 1); \ |
| 213 | } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \ |
| 214 | TXRX_STATS_ADD(_pdev, \ |
| 215 | pub.tx.comp_histogram.pkts_31_40, 1); \ |
| 216 | } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \ |
| 217 | TXRX_STATS_ADD(_pdev, \ |
| 218 | pub.tx.comp_histogram.pkts_41_50, 1); \ |
| 219 | } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \ |
| 220 | TXRX_STATS_ADD(_pdev, \ |
| 221 | pub.tx.comp_histogram.pkts_51_60, 1); \ |
| 222 | } else { \ |
| 223 | TXRX_STATS_ADD(_pdev, \ |
| 224 | pub.tx.comp_histogram.pkts_61_plus, 1); \ |
| 225 | } \ |
| 226 | } while (0) |
| 227 | |
| 228 | #define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \ |
| 229 | do { \ |
| 230 | switch (status) { \ |
| 231 | case htt_tx_status_ok: \ |
| 232 | TXRX_STATS_ADD(_pdev, \ |
| 233 | pub.tx.delivered.pkts, _p_cntrs); \ |
| 234 | TXRX_STATS_ADD(_pdev, \ |
| 235 | pub.tx.delivered.bytes, _b_cntrs); \ |
| 236 | break; \ |
| 237 | case htt_tx_status_discard: \ |
| 238 | TXRX_STATS_ADD(_pdev, \ |
| 239 | pub.tx.dropped.target_discard.pkts, _p_cntrs);\ |
| 240 | TXRX_STATS_ADD(_pdev, \ |
| 241 | pub.tx.dropped.target_discard.bytes, _b_cntrs);\ |
| 242 | break; \ |
| 243 | case htt_tx_status_no_ack: \ |
| 244 | TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \ |
Jeff Johnson | 560dc56 | 2017-03-17 15:19:31 -0700 | [diff] [blame] | 245 | _p_cntrs); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 246 | TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \ |
| 247 | _b_cntrs); \ |
| 248 | break; \ |
| 249 | case htt_tx_status_download_fail: \ |
| 250 | TXRX_STATS_ADD(_pdev, \ |
| 251 | pub.tx.dropped.download_fail.pkts, _p_cntrs); \ |
| 252 | TXRX_STATS_ADD(_pdev, \ |
| 253 | pub.tx.dropped.download_fail.bytes, _b_cntrs);\ |
| 254 | break; \ |
| 255 | default: \ |
Mohit Khanna | ca4173b | 2017-09-12 21:52:19 -0700 | [diff] [blame] | 256 | TXRX_STATS_ADD(_pdev, \ |
| 257 | pub.tx.dropped.others.pkts, _p_cntrs); \ |
| 258 | TXRX_STATS_ADD(_pdev, \ |
| 259 | pub.tx.dropped.others.bytes, _b_cntrs); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 260 | break; \ |
| 261 | } \ |
| 262 | TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \ |
| 263 | } while (0) |
| 264 | |
| 265 | |
| 266 | /*--- txrx sequence number trace macros ---*/ |
| 267 | |
| 268 | #define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status) |
| 269 | |
| 270 | #if defined(ENABLE_RX_REORDER_TRACE) |
| 271 | |
| 272 | A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev); |
| 273 | void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev); |
| 274 | void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev, |
| 275 | uint8_t tid, |
| 276 | uint16_t reorder_idx, |
| 277 | uint16_t seq_num, int num_mpdus); |
| 278 | |
| 279 | #define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach |
| 280 | #define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach |
| 281 | #define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add |
| 282 | |
| 283 | #else |
| 284 | |
| 285 | #define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK |
| 286 | #define OL_RX_REORDER_TRACE_DETACH(_pdev) |
| 287 | #define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus) |
| 288 | |
| 289 | #endif /* ENABLE_RX_REORDER_TRACE */ |
| 290 | |
| 291 | /*--- txrx packet number trace macros ---*/ |
| 292 | |
| 293 | #if defined(ENABLE_RX_PN_TRACE) |
| 294 | |
| 295 | A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev); |
| 296 | void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev); |
| 297 | void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev, |
| 298 | struct ol_txrx_peer_t *peer, |
| 299 | uint16_t tid, void *rx_desc); |
| 300 | |
| 301 | #define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach |
| 302 | #define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach |
| 303 | #define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add |
| 304 | |
| 305 | #else |
| 306 | |
| 307 | #define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK |
| 308 | #define OL_RX_PN_TRACE_DETACH(_pdev) |
| 309 | #define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc) |
| 310 | |
| 311 | #endif /* ENABLE_RX_PN_TRACE */ |
| 312 | |
| 313 | static inline int ol_txrx_ieee80211_hdrsize(const void *data) |
| 314 | { |
| 315 | const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; |
| 316 | int size = sizeof(struct ieee80211_frame); |
| 317 | |
| 318 | /* NB: we don't handle control frames */ |
| 319 | TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != |
| 320 | IEEE80211_FC0_TYPE_CTL); |
| 321 | if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == |
| 322 | IEEE80211_FC1_DIR_DSTODS) |
| 323 | size += IEEE80211_ADDR_LEN; |
| 324 | if (IEEE80211_QOS_HAS_SEQ(wh)) { |
| 325 | size += sizeof(uint16_t); |
| 326 | /* Qos frame with Order bit set indicates an HTC frame */ |
| 327 | if (wh->i_fc[1] & IEEE80211_FC1_ORDER) |
| 328 | size += sizeof(struct ieee80211_htc); |
| 329 | } |
| 330 | return size; |
| 331 | } |
| 332 | |
| 333 | /*--- frame display utility ---*/ |
| 334 | |
| 335 | enum ol_txrx_frm_dump_options { |
| 336 | ol_txrx_frm_dump_contents = 0x1, |
| 337 | ol_txrx_frm_dump_tcp_seq = 0x2, |
| 338 | }; |
| 339 | |
| 340 | #ifdef TXRX_DEBUG_DATA |
| 341 | static inline void |
| 342 | ol_txrx_frms_dump(const char *name, |
| 343 | struct ol_txrx_pdev_t *pdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 344 | qdf_nbuf_t frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 345 | enum ol_txrx_frm_dump_options display_options, int max_len) |
| 346 | { |
| 347 | #define TXRX_FRM_DUMP_MAX_LEN 128 |
| 348 | uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 }; |
| 349 | uint8_t *p; |
| 350 | |
| 351 | if (name) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 352 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, "%s\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 353 | name); |
| 354 | } |
| 355 | while (frm) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 356 | p = qdf_nbuf_data(frm); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 357 | if (display_options & ol_txrx_frm_dump_tcp_seq) { |
| 358 | int tcp_offset; |
| 359 | int l2_hdr_size; |
| 360 | uint16_t ethtype; |
| 361 | uint8_t ip_prot; |
| 362 | |
| 363 | if (pdev->frame_format == wlan_frm_fmt_802_3) { |
| 364 | struct ethernet_hdr_t *enet_hdr = |
| 365 | (struct ethernet_hdr_t *)p; |
| 366 | l2_hdr_size = ETHERNET_HDR_LEN; |
| 367 | |
| 368 | /* |
| 369 | * LLC/SNAP present? |
| 370 | */ |
| 371 | ethtype = (enet_hdr->ethertype[0] << 8) | |
| 372 | enet_hdr->ethertype[1]; |
| 373 | if (!IS_ETHERTYPE(ethertype)) { |
| 374 | /* 802.3 format */ |
| 375 | struct llc_snap_hdr_t *llc_hdr; |
| 376 | |
| 377 | llc_hdr = (struct llc_snap_hdr_t *) |
| 378 | (p + l2_hdr_size); |
| 379 | l2_hdr_size += LLC_SNAP_HDR_LEN; |
| 380 | ethtype = (llc_hdr->ethertype[0] << 8) | |
| 381 | llc_hdr->ethertype[1]; |
| 382 | } |
| 383 | } else { |
| 384 | struct llc_snap_hdr_t *llc_hdr; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 385 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 386 | /* (generic?) 802.11 */ |
| 387 | l2_hdr_size = sizeof(struct ieee80211_frame); |
| 388 | llc_hdr = (struct llc_snap_hdr_t *) |
| 389 | (p + l2_hdr_size); |
| 390 | l2_hdr_size += LLC_SNAP_HDR_LEN; |
| 391 | ethtype = (llc_hdr->ethertype[0] << 8) | |
| 392 | llc_hdr->ethertype[1]; |
| 393 | } |
| 394 | if (ethtype == ETHERTYPE_IPV4) { |
| 395 | struct ipv4_hdr_t *ipv4_hdr; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 396 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 397 | ipv4_hdr = |
| 398 | (struct ipv4_hdr_t *)(p + l2_hdr_size); |
| 399 | ip_prot = ipv4_hdr->protocol; |
| 400 | tcp_offset = l2_hdr_size + IPV4_HDR_LEN; |
| 401 | } else if (ethtype == ETHERTYPE_IPV6) { |
| 402 | struct ipv6_hdr_t *ipv6_hdr; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 403 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 404 | ipv6_hdr = |
| 405 | (struct ipv6_hdr_t *)(p + l2_hdr_size); |
| 406 | ip_prot = ipv6_hdr->next_hdr; |
| 407 | tcp_offset = l2_hdr_size + IPV6_HDR_LEN; |
| 408 | } else { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 409 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 410 | QDF_TRACE_LEVEL_INFO, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 411 | "frame %pK non-IP ethertype (%x)\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 412 | frm, ethtype); |
| 413 | goto NOT_IP_TCP; |
| 414 | } |
| 415 | if (ip_prot == IP_PROTOCOL_TCP) { |
| 416 | #if NEVERDEFINED |
| 417 | struct tcp_hdr_t *tcp_hdr; |
| 418 | uint32_t tcp_seq_num; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 419 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 420 | tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset); |
| 421 | tcp_seq_num = |
| 422 | (tcp_hdr->seq_num[0] << 24) | |
| 423 | (tcp_hdr->seq_num[1] << 16) | |
| 424 | (tcp_hdr->seq_num[1] << 8) | |
| 425 | (tcp_hdr->seq_num[1] << 0); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 426 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 427 | QDF_TRACE_LEVEL_INFO, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 428 | "frame %pK: TCP seq num = %d\n", frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 429 | tcp_seq_num); |
| 430 | #else |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 431 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 432 | QDF_TRACE_LEVEL_INFO, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 433 | "frame %pK: TCP seq num = %d\n", frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 434 | ((*(p + tcp_offset + 4)) << 24) | |
| 435 | ((*(p + tcp_offset + 5)) << 16) | |
| 436 | ((*(p + tcp_offset + 6)) << 8) | |
| 437 | (*(p + tcp_offset + 7))); |
| 438 | #endif |
| 439 | } else { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 440 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 441 | QDF_TRACE_LEVEL_INFO, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 442 | "frame %pK non-TCP IP protocol (%x)\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 443 | frm, ip_prot); |
| 444 | } |
| 445 | } |
| 446 | NOT_IP_TCP: |
| 447 | if (display_options & ol_txrx_frm_dump_contents) { |
| 448 | int i, frag_num, len_lim; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 449 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 450 | len_lim = max_len; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 451 | if (len_lim > qdf_nbuf_len(frm)) |
| 452 | len_lim = qdf_nbuf_len(frm); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 453 | if (len_lim > TXRX_FRM_DUMP_MAX_LEN) |
| 454 | len_lim = TXRX_FRM_DUMP_MAX_LEN; |
| 455 | |
| 456 | /* |
| 457 | * Gather frame contents from netbuf fragments |
| 458 | * into a contiguous buffer. |
| 459 | */ |
| 460 | frag_num = 0; |
| 461 | i = 0; |
| 462 | while (i < len_lim) { |
| 463 | int frag_bytes; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 464 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 465 | frag_bytes = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 466 | qdf_nbuf_get_frag_len(frm, frag_num); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 467 | if (frag_bytes > len_lim - i) |
| 468 | frag_bytes = len_lim - i; |
| 469 | if (frag_bytes > 0) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 470 | p = qdf_nbuf_get_frag_vaddr(frm, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 471 | frag_num); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 472 | qdf_mem_copy(&local_buf[i], p, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 473 | frag_bytes); |
| 474 | } |
| 475 | frag_num++; |
| 476 | i += frag_bytes; |
| 477 | } |
| 478 | |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 479 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Jeff Johnson | c13bfe0 | 2017-09-18 08:16:17 -0700 | [diff] [blame] | 480 | "frame %pK data (%pK), hex dump of bytes 0-%d of %d:\n", |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 481 | frm, p, len_lim - 1, (int)qdf_nbuf_len(frm)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 482 | p = local_buf; |
| 483 | while (len_lim > 16) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 484 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 485 | QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 486 | " " /* indent */ |
| 487 | "%02x %02x %02x %02x %02x %02x %02x %02x " |
| 488 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", |
| 489 | *(p + 0), *(p + 1), *(p + 2), |
| 490 | *(p + 3), *(p + 4), *(p + 5), |
| 491 | *(p + 6), *(p + 7), *(p + 8), |
| 492 | *(p + 9), *(p + 10), *(p + 11), |
| 493 | *(p + 12), *(p + 13), *(p + 14), |
| 494 | *(p + 15)); |
| 495 | p += 16; |
| 496 | len_lim -= 16; |
| 497 | } |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 498 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 499 | " " /* indent */); |
| 500 | while (len_lim > 0) { |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 501 | QDF_TRACE(QDF_MODULE_ID_TXRX, |
| 502 | QDF_TRACE_LEVEL_INFO, "%02x ", *p); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 503 | p++; |
| 504 | len_lim--; |
| 505 | } |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 506 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 507 | "\n"); |
| 508 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 509 | frm = qdf_nbuf_next(frm); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 510 | } |
| 511 | } |
| 512 | #else |
| 513 | #define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len) |
| 514 | #endif /* TXRX_DEBUG_DATA */ |
| 515 | |
| 516 | #ifdef SUPPORT_HOST_STATISTICS |
| 517 | |
| 518 | #define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \ |
| 519 | ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \ |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 520 | sec_type, is_mcast) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 521 | |
| 522 | #define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \ |
| 523 | do { \ |
| 524 | int is_mcast; \ |
| 525 | enum htt_sec_type sec_type; \ |
| 526 | is_mcast = htt_rx_msdu_is_wlan_mcast( \ |
| 527 | pdev->htt_pdev, rx_desc); \ |
| 528 | sec_type = peer->security[is_mcast \ |
| 529 | ? txrx_sec_mcast \ |
| 530 | : txrx_sec_ucast].sec_type; \ |
| 531 | OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \ |
| 532 | pdev->sec_types[sec_type], \ |
| 533 | is_mcast); \ |
| 534 | } while (false) |
| 535 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 536 | #ifdef CONFIG_HL_SUPPORT |
| 537 | |
| 538 | /** |
| 539 | * ol_rx_err_inv_get_wifi_header() - retrieve wifi header |
| 540 | * @pdev: handle to the physical device |
| 541 | * @rx_msdu: msdu of which header needs to be retrieved |
| 542 | * |
| 543 | * Return: wifi header |
| 544 | */ |
| 545 | static inline |
| 546 | struct ieee80211_frame *ol_rx_err_inv_get_wifi_header( |
| 547 | struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu) |
| 548 | { |
| 549 | return NULL; |
| 550 | } |
| 551 | #else |
| 552 | |
| 553 | static inline |
| 554 | struct ieee80211_frame *ol_rx_err_inv_get_wifi_header( |
| 555 | struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu) |
| 556 | { |
| 557 | struct ieee80211_frame *wh = NULL; |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 558 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 559 | if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi) |
| 560 | /* For windows, it is always native wifi header .*/ |
| 561 | wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu); |
| 562 | |
| 563 | return wh; |
| 564 | } |
| 565 | #endif |
| 566 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 567 | #define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \ |
| 568 | do { \ |
| 569 | struct ieee80211_frame *wh = NULL; \ |
| 570 | /*FIX THIS : */ \ |
| 571 | /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \ |
| 572 | /*But at present it seems it does not work.*/ \ |
| 573 | /*wh = (struct ieee80211_frame *) */ \ |
| 574 | /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \ |
| 575 | /* this only apply to LL device.*/ \ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 576 | wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 577 | ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \ |
| 578 | wh, OL_RX_ERR_UNKNOWN_PEER); \ |
| 579 | } while (false) |
| 580 | |
| 581 | #define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \ |
| 582 | do { \ |
| 583 | enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \ |
| 584 | if (rx_status == htt_rx_status_decrypt_err) \ |
| 585 | err_type = OL_RX_ERR_DECRYPT; \ |
| 586 | else if (rx_status == htt_rx_status_tkip_mic_err) \ |
| 587 | err_type = OL_RX_ERR_TKIP_MIC; \ |
| 588 | else if (rx_status == htt_rx_status_mpdu_length_err) \ |
| 589 | err_type = OL_RX_ERR_MPDU_LENGTH; \ |
| 590 | else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \ |
| 591 | err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \ |
| 592 | else if (rx_status == htt_rx_status_err_dup) \ |
| 593 | err_type = OL_RX_ERR_DUP; \ |
| 594 | else if (rx_status == htt_rx_status_err_fcs) \ |
| 595 | err_type = OL_RX_ERR_FCS; \ |
| 596 | else \ |
| 597 | err_type = OL_RX_ERR_UNKNOWN; \ |
| 598 | \ |
| 599 | if (vdev != NULL && peer != NULL) { \ |
| 600 | OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \ |
| 601 | rx_mpdu_desc, err_type); \ |
| 602 | } else { \ |
| 603 | OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \ |
| 604 | } \ |
| 605 | } while (false) |
| 606 | #else |
| 607 | #define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) |
| 608 | #define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) |
| 609 | #define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) |
| 610 | #endif /* SUPPORT_HOST_STATISTICS */ |
| 611 | |
| 612 | #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS |
| 613 | #define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \ |
| 614 | do { \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 615 | qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 616 | peer->stats.tx_or_rx.frms.type += 1; \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 617 | peer->stats.tx_or_rx.bytes.type += qdf_nbuf_len(msdu); \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 618 | qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 619 | } while (0) |
| 620 | #define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \ |
| 621 | do { \ |
| 622 | struct ol_txrx_vdev_t *vdev = peer->vdev; \ |
| 623 | struct ol_txrx_pdev_t *pdev = vdev->pdev; \ |
| 624 | uint8_t *dest_addr; \ |
| 625 | if (pdev->frame_format == wlan_frm_fmt_802_3) { \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 626 | dest_addr = qdf_nbuf_data(msdu); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 627 | } else { /* 802.11 format */ \ |
| 628 | struct ieee80211_frame *frm; \ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 629 | frm = (struct ieee80211_frame *) qdf_nbuf_data(msdu); \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 630 | if (vdev->opmode == wlan_op_mode_ap) { \ |
| 631 | dest_addr = (uint8_t *) &(frm->i_addr1[0]); \ |
| 632 | } else { \ |
| 633 | dest_addr = (uint8_t *) &(frm->i_addr3[0]); \ |
| 634 | } \ |
| 635 | } \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 636 | if (qdf_unlikely(IEEE80211_IS_BROADCAST(dest_addr))) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 637 | OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \ |
| 638 | bcast, msdu); \ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 639 | } else if (qdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 640 | OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \ |
| 641 | mcast, msdu); \ |
| 642 | } else { \ |
| 643 | OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \ |
| 644 | ucast, msdu); \ |
| 645 | } \ |
| 646 | } while (0) |
| 647 | #define OL_TX_PEER_STATS_UPDATE(peer, msdu) \ |
| 648 | OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu) |
| 649 | #define OL_RX_PEER_STATS_UPDATE(peer, msdu) \ |
| 650 | OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu) |
| 651 | #define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 652 | qdf_spinlock_create(&pdev->peer_stat_mutex) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 653 | #define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \ |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 654 | qdf_spinlock_destroy(&pdev->peer_stat_mutex) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 655 | #else |
| 656 | #define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */ |
| 657 | #define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */ |
| 658 | #define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */ |
| 659 | #define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */ |
| 660 | #endif |
| 661 | |
| 662 | #ifndef DEBUG_HTT_CREDIT |
| 663 | #define DEBUG_HTT_CREDIT 0 |
| 664 | #endif |
| 665 | |
| 666 | #if defined(FEATURE_TSO_DEBUG) |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 667 | #define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) \ |
| 668 | do { \ |
| 669 | if (_p_cntrs == 1) { \ |
| 670 | TXRX_STATS_ADD(_pdev, pub.tx.tso.tso_hist.pkts_1, 1); \ |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 671 | } else if (_p_cntrs >= 2 && _p_cntrs <= 5) { \ |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 672 | TXRX_STATS_ADD(_pdev, \ |
| 673 | pub.tx.tso.tso_hist.pkts_2_5, 1); \ |
| 674 | } else if (_p_cntrs > 5 && _p_cntrs <= 10) { \ |
| 675 | TXRX_STATS_ADD(_pdev, \ |
| 676 | pub.tx.tso.tso_hist.pkts_6_10, 1); \ |
| 677 | } else if (_p_cntrs > 10 && _p_cntrs <= 15) { \ |
| 678 | TXRX_STATS_ADD(_pdev, \ |
| 679 | pub.tx.tso.tso_hist.pkts_11_15, 1); \ |
| 680 | } else if (_p_cntrs > 15 && _p_cntrs <= 20) { \ |
| 681 | TXRX_STATS_ADD(_pdev, \ |
| 682 | pub.tx.tso.tso_hist.pkts_16_20, 1); \ |
| 683 | } else if (_p_cntrs > 20) { \ |
| 684 | TXRX_STATS_ADD(_pdev, \ |
| 685 | pub.tx.tso.tso_hist.pkts_20_plus, 1); \ |
| 686 | } \ |
| 687 | } while (0) |
| 688 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 689 | #define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 690 | do { \ |
Yun Park | 641304c | 2017-04-09 10:16:11 -0700 | [diff] [blame] | 691 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg \ |
| 692 | = 0; \ |
| 693 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx \ |
| 694 | = 0; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 695 | } while (0) |
| 696 | |
| 697 | #define TXRX_STATS_TSO_MSDU_IDX(pdev) \ |
| 698 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx |
| 699 | |
| 700 | #define TXRX_STATS_TSO_MSDU(pdev, idx) \ |
| 701 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx] |
| 702 | |
| 703 | #define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \ |
| 704 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg |
| 705 | |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 706 | #define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) \ |
| 707 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].gso_size |
| 708 | |
| 709 | #define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) \ |
| 710 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].total_len |
| 711 | |
| 712 | #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) \ |
| 713 | pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].nr_frags |
| 714 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 715 | #define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) \ |
| 716 | TXRX_STATS_TSO_MSDU(pdev, idx) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 717 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 718 | #define TXRX_STATS_TSO_SEG_IDX(pdev, idx) \ |
| 719 | TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx |
| 720 | |
| 721 | #define TXRX_STATS_TSO_INC_SEG(pdev, idx) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 722 | do { \ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 723 | TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg++; \ |
| 724 | TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg &= \ |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 725 | NUM_MAX_TSO_SEGS_MASK; \ |
| 726 | } while (0) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 727 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 728 | #define TXRX_STATS_TSO_RST_SEG(pdev, idx) \ |
| 729 | TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg = 0 |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 730 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 731 | #define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) \ |
| 732 | TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx = 0 |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 733 | |
| 734 | #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \ |
| 735 | TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx] |
| 736 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 737 | #define TXRX_STATS_TSO_CURR_SEG(pdev, idx) \ |
| 738 | TXRX_STATS_TSO_SEG(pdev, idx, \ |
| 739 | TXRX_STATS_TSO_SEG_IDX(pdev, idx)) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 740 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 741 | #define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 742 | do { \ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 743 | TXRX_STATS_TSO_SEG_IDX(pdev, idx)++; \ |
| 744 | TXRX_STATS_TSO_SEG_IDX(pdev, idx) &= NUM_MAX_TSO_SEGS_MASK; \ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 745 | } while (0) |
| 746 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 747 | #define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) \ |
| 748 | (TXRX_STATS_TSO_CURR_SEG(pdev, idx) = tso_seg) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 749 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 750 | #define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) \ |
| 751 | (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).gso_size = size) |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 752 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 753 | #define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) \ |
| 754 | (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).total_len = len) |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 755 | |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 756 | #define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) \ |
| 757 | (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).nr_frags = frags) |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 758 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 759 | #else |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 760 | #define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) /* no-op */ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 761 | #define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) /* no-op */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 762 | #define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */ |
| 763 | #define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */ |
| 764 | #define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 765 | #define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) /* no-op */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 766 | #define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 767 | #define TXRX_STATS_TSO_SEG_IDX(pdev, idx) /* no-op */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 768 | #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */ |
Himanshu Agarwal | 5501c19 | 2017-02-14 11:39:39 +0530 | [diff] [blame] | 769 | #define TXRX_STATS_TSO_CURR_SEG(pdev, idx) /* no-op */ |
| 770 | #define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) /* no-op */ |
| 771 | #define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) /* no-op */ |
| 772 | #define TXRX_STATS_TSO_INC_SEG(pdev, idx) /* no-op */ |
| 773 | #define TXRX_STATS_TSO_RST_SEG(pdev, idx) /* no-op */ |
| 774 | #define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) /* no-op */ |
| 775 | #define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) /* no-op */ |
| 776 | #define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) /* no-op */ |
| 777 | #define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) /* no-op */ |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 778 | #define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) /* no-op */ |
| 779 | #define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) /* no-op */ |
| 780 | #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) /* no-op */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 781 | |
| 782 | #endif /* FEATURE_TSO_DEBUG */ |
| 783 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 784 | #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL |
| 785 | |
| 786 | void |
| 787 | ol_txrx_update_group_credit( |
| 788 | struct ol_tx_queue_group_t *group, |
| 789 | int32_t credit, |
| 790 | u_int8_t absolute); |
| 791 | #endif |
| 792 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 793 | #endif /* _OL_TXRX_INTERNAL__H_ */ |