Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Ashish Kumar Dhanotiya | 94ffbd1 | 2019-08-08 18:00:59 +0530 | [diff] [blame^] | 2 | * Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | /** |
| 20 | * @file ol_tx_desc.h |
| 21 | * @brief API definitions for the tx descriptor module within the data SW. |
| 22 | */ |
| 23 | #ifndef _OL_TX_DESC__H_ |
| 24 | #define _OL_TX_DESC__H_ |
| 25 | |
Ashish Kumar Dhanotiya | 94ffbd1 | 2019-08-08 18:00:59 +0530 | [diff] [blame^] | 26 | #include "queue.h" /* TAILQ_HEAD */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 27 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 28 | #include <cdp_txrx_cmn.h> /* ol_txrx_vdev_t, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 29 | #include <ol_txrx_internal.h> /*TXRX_ASSERT2 */ |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 30 | #include <ol_htt_tx_api.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 31 | |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 32 | #define DIV_BY_8 3 |
| 33 | #define DIV_BY_32 5 |
| 34 | #define MOD_BY_8 0x7 |
| 35 | #define MOD_BY_32 0x1F |
| 36 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 37 | struct ol_tx_desc_t * |
| 38 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 39 | struct ol_txrx_vdev_t *vdev, |
| 40 | struct ol_txrx_msdu_info_t *msdu_info); |
| 41 | |
| 42 | |
| 43 | /** |
| 44 | * @brief Allocate and initialize a tx descriptor for a LL system. |
| 45 | * @details |
| 46 | * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor |
| 47 | * for private use within the host data SW, and a HTT tx descriptor for |
| 48 | * downloading tx meta-data to the target FW/HW. |
| 49 | * Fill in the fields of this pair of tx descriptors based on the |
| 50 | * information in the netbuf. |
| 51 | * For LL, this includes filling in a fragmentation descriptor to |
| 52 | * specify to the MAC HW where to find the tx frame's fragments. |
| 53 | * |
| 54 | * @param pdev - the data physical device sending the data |
| 55 | * (for accessing the tx desc pool) |
| 56 | * @param vdev - the virtual device sending the data |
| 57 | * (for specifying the transmitter address for multicast / broadcast data) |
| 58 | * @param netbuf - the tx frame |
| 59 | * @param msdu_info - tx meta-data |
| 60 | */ |
| 61 | struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev, |
| 62 | struct ol_txrx_vdev_t *vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 63 | qdf_nbuf_t netbuf, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 64 | struct ol_txrx_msdu_info_t *msdu_info); |
| 65 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * @brief Allocate and initialize a tx descriptor for a HL system. |
| 69 | * @details |
| 70 | * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor |
| 71 | * for private use within the host data SW, and a HTT tx descriptor for |
| 72 | * downloading tx meta-data to the target FW/HW. |
| 73 | * Fill in the fields of this pair of tx descriptors based on the |
| 74 | * information in the netbuf. |
| 75 | * |
| 76 | * @param pdev - the data physical device sending the data |
| 77 | * (for accessing the tx desc pool) |
| 78 | * @param vdev - the virtual device sending the data |
| 79 | * (for specifying the transmitter address for multicast / broadcast data) |
| 80 | * @param netbuf - the tx frame |
| 81 | * @param msdu_info - tx meta-data |
| 82 | */ |
| 83 | struct ol_tx_desc_t * |
| 84 | ol_tx_desc_hl( |
| 85 | struct ol_txrx_pdev_t *pdev, |
| 86 | struct ol_txrx_vdev_t *vdev, |
| 87 | qdf_nbuf_t netbuf, |
| 88 | struct ol_txrx_msdu_info_t *msdu_info); |
| 89 | |
| 90 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 91 | /** |
Jeff Johnson | 3dca222 | 2018-05-12 15:10:43 -0700 | [diff] [blame] | 92 | * @brief Use a tx descriptor ID to find the corresponding descriptor object. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 93 | * |
| 94 | * @param pdev - the data physical device sending the data |
| 95 | * @param tx_desc_id - the ID of the descriptor in question |
| 96 | * @return the descriptor object that has the specified ID |
| 97 | */ |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 98 | static inline struct ol_tx_desc_t *ol_tx_desc_find( |
| 99 | struct ol_txrx_pdev_t *pdev, uint16_t tx_desc_id) |
| 100 | { |
| 101 | void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages; |
| 102 | |
| 103 | return &((union ol_tx_desc_list_elem_t *) |
| 104 | (td_base[tx_desc_id >> pdev->tx_desc.page_divider] + |
| 105 | (pdev->tx_desc.desc_reserved_size * |
| 106 | (tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc; |
| 107 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 108 | |
| 109 | /** |
Jeff Johnson | 3dca222 | 2018-05-12 15:10:43 -0700 | [diff] [blame] | 110 | * @brief Use a tx descriptor ID to find the corresponding descriptor object |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 111 | * and add sanity check. |
| 112 | * |
| 113 | * @param pdev - the data physical device sending the data |
| 114 | * @param tx_desc_id - the ID of the descriptor in question |
| 115 | * @return the descriptor object that has the specified ID, |
| 116 | * if failure, will return NULL. |
| 117 | */ |
| 118 | |
| 119 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 120 | static inline struct ol_tx_desc_t * |
| 121 | ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id) |
| 122 | { |
| 123 | struct ol_tx_desc_t *tx_desc; |
| 124 | |
Tiger Yu | 6a10e3e | 2017-12-28 11:01:34 +0800 | [diff] [blame] | 125 | if (tx_desc_id >= pdev->tx_desc.pool_size) |
| 126 | return NULL; |
| 127 | |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 128 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
| 129 | |
Yun Park | cb0bb18 | 2017-04-06 22:23:20 -0700 | [diff] [blame] | 130 | if (tx_desc->pkt_type == ol_tx_frm_freed) |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 131 | return NULL; |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 132 | |
| 133 | return tx_desc; |
| 134 | } |
| 135 | |
| 136 | #else |
| 137 | |
| 138 | static inline struct ol_tx_desc_t * |
| 139 | ol_tx_desc_find_check(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id) |
| 140 | { |
jiad | b206277 | 2017-05-19 10:57:26 +0800 | [diff] [blame] | 141 | struct ol_tx_desc_t *tx_desc; |
| 142 | |
Tiger Yu | 6a10e3e | 2017-12-28 11:01:34 +0800 | [diff] [blame] | 143 | if (tx_desc_id >= pdev->tx_desc.pool_size) |
| 144 | return NULL; |
| 145 | |
jiad | b206277 | 2017-05-19 10:57:26 +0800 | [diff] [blame] | 146 | tx_desc = ol_tx_desc_find(pdev, tx_desc_id); |
| 147 | |
| 148 | /* check against invalid tx_desc_id */ |
| 149 | if (ol_cfg_is_high_latency(pdev->ctrl_pdev) && !tx_desc->vdev) |
| 150 | return NULL; |
| 151 | |
| 152 | return tx_desc; |
gbian | e55c956 | 2016-11-01 14:47:47 +0800 | [diff] [blame] | 153 | } |
| 154 | #endif |
| 155 | |
| 156 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 157 | * @brief Free a list of tx descriptors and the tx frames they refer to. |
| 158 | * @details |
| 159 | * Free a batch of "standard" tx descriptors and their tx frames. |
| 160 | * Free each tx descriptor, by returning it to the freelist. |
| 161 | * Unmap each netbuf, and free the netbufs as a batch. |
Jeff Johnson | 3dca222 | 2018-05-12 15:10:43 -0700 | [diff] [blame] | 162 | * Irregular tx frames like TSO or management frames that require |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 163 | * special handling are processed by the ol_tx_desc_frame_free_nonstd |
| 164 | * function rather than this function. |
| 165 | * |
| 166 | * @param pdev - the data physical device that sent the data |
| 167 | * @param tx_descs - a list of SW tx descriptors for the tx frames |
| 168 | * @param had_error - bool indication of whether the transmission failed. |
| 169 | * This is provided to callback functions that get notified of |
| 170 | * the tx frame completion. |
| 171 | */ |
| 172 | void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev, |
| 173 | ol_tx_desc_list *tx_descs, int had_error); |
| 174 | |
| 175 | /** |
| 176 | * @brief Free a non-standard tx frame and its tx descriptor. |
| 177 | * @details |
| 178 | * Check the tx frame type (e.g. TSO vs. management) to determine what |
| 179 | * special steps, if any, need to be performed prior to freeing the |
| 180 | * tx frame and its tx descriptor. |
| 181 | * This function can also be used to free single standard tx frames. |
| 182 | * After performing any special steps based on tx frame type, free the |
| 183 | * tx descriptor, i.e. return it to the freelist, and unmap and |
| 184 | * free the netbuf referenced by the tx descriptor. |
| 185 | * |
| 186 | * @param pdev - the data physical device that sent the data |
| 187 | * @param tx_desc - the SW tx descriptor for the tx frame that was sent |
| 188 | * @param had_error - bool indication of whether the transmission failed. |
| 189 | * This is provided to callback functions that get notified of |
| 190 | * the tx frame completion. |
| 191 | */ |
| 192 | void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev, |
| 193 | struct ol_tx_desc_t *tx_desc, int had_error); |
| 194 | |
| 195 | /* |
| 196 | * @brief Determine the ID of a tx descriptor. |
| 197 | * |
| 198 | * @param pdev - the physical device that is sending the data |
| 199 | * @param tx_desc - the descriptor whose ID is being determined |
| 200 | * @return numeric ID that uniquely identifies the tx descriptor |
| 201 | */ |
| 202 | static inline uint16_t |
| 203 | ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) |
| 204 | { |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 205 | TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size); |
| 206 | return tx_desc->id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | /* |
| 210 | * @brief Retrieves the beacon headr for the vdev |
| 211 | * @param pdev - opaque pointe to scn |
| 212 | * @param vdevid - vdev id |
| 213 | * @return void pointer to the beacon header for the given vdev |
| 214 | */ |
| 215 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 216 | void *ol_ath_get_bcn_header(struct cdp_cfg *cfg_pdev, A_UINT32 vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 217 | |
| 218 | /* |
| 219 | * @brief Free a tx descriptor, without freeing the matching frame. |
| 220 | * @details |
| 221 | * This function is using during the function call that submits tx frames |
| 222 | * into the txrx layer, for cases where a tx descriptor is successfully |
| 223 | * allocated, but for other reasons the frame could not be accepted. |
| 224 | * |
| 225 | * @param pdev - the data physical device that is sending the data |
| 226 | * @param tx_desc - the descriptor being freed |
| 227 | */ |
| 228 | void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc); |
| 229 | |
| 230 | #if defined(FEATURE_TSO) |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 231 | struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 232 | |
| 233 | void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev, |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 234 | struct qdf_tso_seg_elem_t *tso_seg); |
Poddar, Siddarth | 3f1fb13 | 2017-01-12 17:25:52 +0530 | [diff] [blame] | 235 | struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc( |
| 236 | struct ol_txrx_pdev_t *pdev); |
| 237 | void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev, |
| 238 | struct qdf_tso_num_seg_elem_t *tso_num_seg); |
Himanshu Agarwal | 0e90a7b | 2017-03-24 17:28:40 +0530 | [diff] [blame] | 239 | void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev, |
Poddar, Siddarth | 39ba8e0 | 2017-03-03 16:46:39 +0530 | [diff] [blame] | 240 | struct ol_txrx_msdu_info_t *msdu_info, |
| 241 | bool is_tso_seg_mapping_done); |
Poddar, Siddarth | 3f1fb13 | 2017-01-12 17:25:52 +0530 | [diff] [blame] | 242 | |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 243 | #else |
| 244 | #define ol_tso_alloc_segment(pdev) /*no-op*/ |
| 245 | #define ol_tso_free_segment(pdev, tso_seg) /*no-op*/ |
Poddar, Siddarth | 3f1fb13 | 2017-01-12 17:25:52 +0530 | [diff] [blame] | 246 | #define ol_tso_num_seg_alloc(pdev) /*no-op*/ |
| 247 | #define ol_tso_num_seg_free(pdev, tso_num_seg) /*no-op*/ |
Poddar, Siddarth | 39ba8e0 | 2017-03-03 16:46:39 +0530 | [diff] [blame] | 248 | /*no-op*/ |
| 249 | #define ol_free_remaining_tso_segs(vdev, msdu_info, is_tso_seg_mapping_done) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 250 | #endif |
| 251 | |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 252 | /** |
| 253 | * ol_tx_get_desc_global_pool() - get descriptor from global pool |
| 254 | * @pdev: pdev handler |
| 255 | * |
| 256 | * Caller needs to take lock and do sanity checks. |
| 257 | * |
| 258 | * Return: tx descriptor |
| 259 | */ |
| 260 | static inline |
| 261 | struct ol_tx_desc_t *ol_tx_get_desc_global_pool(struct ol_txrx_pdev_t *pdev) |
| 262 | { |
| 263 | struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.freelist->tx_desc; |
Yun Park | cb0bb18 | 2017-04-06 22:23:20 -0700 | [diff] [blame] | 264 | |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 265 | pdev->tx_desc.freelist = pdev->tx_desc.freelist->next; |
| 266 | pdev->tx_desc.num_free--; |
| 267 | return tx_desc; |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * ol_tx_put_desc_global_pool() - put descriptor to global pool freelist |
| 272 | * @pdev: pdev handle |
| 273 | * @tx_desc: tx descriptor |
| 274 | * |
| 275 | * Caller needs to take lock and do sanity checks. |
| 276 | * |
| 277 | * Return: none |
| 278 | */ |
| 279 | static inline |
| 280 | void ol_tx_put_desc_global_pool(struct ol_txrx_pdev_t *pdev, |
| 281 | struct ol_tx_desc_t *tx_desc) |
| 282 | { |
| 283 | ((union ol_tx_desc_list_elem_t *)tx_desc)->next = |
| 284 | pdev->tx_desc.freelist; |
| 285 | pdev->tx_desc.freelist = |
| 286 | (union ol_tx_desc_list_elem_t *)tx_desc; |
| 287 | pdev->tx_desc.num_free++; |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 288 | } |
| 289 | |
| 290 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 291 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
Nirav Shah | a3cc719 | 2018-03-23 00:03:24 +0530 | [diff] [blame] | 292 | |
| 293 | #ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE |
| 294 | int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void); |
| 295 | #else |
| 296 | static inline |
| 297 | int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void) |
| 298 | { |
| 299 | return 0; |
| 300 | } |
| 301 | #endif |
| 302 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 303 | int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool); |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 304 | /** |
| 305 | * ol_tx_get_desc_flow_pool() - get descriptor from flow pool |
| 306 | * @pool: flow pool |
| 307 | * |
| 308 | * Caller needs to take lock and do sanity checks. |
| 309 | * |
| 310 | * Return: tx descriptor |
| 311 | */ |
| 312 | static inline |
| 313 | struct ol_tx_desc_t *ol_tx_get_desc_flow_pool(struct ol_tx_flow_pool_t *pool) |
| 314 | { |
| 315 | struct ol_tx_desc_t *tx_desc = &pool->freelist->tx_desc; |
Yun Park | cb0bb18 | 2017-04-06 22:23:20 -0700 | [diff] [blame] | 316 | |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 317 | pool->freelist = pool->freelist->next; |
| 318 | pool->avail_desc--; |
| 319 | return tx_desc; |
| 320 | } |
| 321 | |
| 322 | /** |
| 323 | * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist |
| 324 | * @pool: flow pool |
| 325 | * @tx_desc: tx descriptor |
| 326 | * |
| 327 | * Caller needs to take lock and do sanity checks. |
| 328 | * |
| 329 | * Return: none |
| 330 | */ |
| 331 | static inline |
| 332 | void ol_tx_put_desc_flow_pool(struct ol_tx_flow_pool_t *pool, |
| 333 | struct ol_tx_desc_t *tx_desc) |
| 334 | { |
| 335 | tx_desc->pool = pool; |
| 336 | ((union ol_tx_desc_list_elem_t *)tx_desc)->next = pool->freelist; |
| 337 | pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc; |
| 338 | pool->avail_desc++; |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 339 | } |
| 340 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 341 | #else |
| 342 | static inline int ol_tx_free_invalid_flow_pool(void *pool) |
| 343 | { |
| 344 | return 0; |
| 345 | } |
| 346 | #endif |
| 347 | |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 348 | #ifdef DESC_DUP_DETECT_DEBUG |
| 349 | /** |
| 350 | * ol_tx_desc_dup_detect_init() - initialize descriptor duplication logic |
| 351 | * @pdev: pdev handle |
| 352 | * @pool_size: global pool size |
| 353 | * |
| 354 | * Return: none |
| 355 | */ |
| 356 | static inline |
| 357 | void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t pool_size) |
| 358 | { |
| 359 | uint16_t size = (pool_size >> DIV_BY_8) + |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 360 | sizeof(*pdev->tx_desc.free_list_bitmap); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 361 | pdev->tx_desc.free_list_bitmap = qdf_mem_malloc(size); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | /** |
| 365 | * ol_tx_desc_dup_detect_deinit() - deinit descriptor duplication logic |
| 366 | * @pdev: pdev handle |
| 367 | * |
| 368 | * Return: none |
| 369 | */ |
| 370 | static inline |
| 371 | void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev) |
| 372 | { |
Srinivas Girigowda | b8ecec2 | 2017-03-09 15:02:59 -0800 | [diff] [blame] | 373 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
| 374 | "%s: pool_size %d num_free %d\n", __func__, |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 375 | pdev->tx_desc.pool_size, pdev->tx_desc.num_free); |
| 376 | if (pdev->tx_desc.free_list_bitmap) |
| 377 | qdf_mem_free(pdev->tx_desc.free_list_bitmap); |
| 378 | } |
| 379 | |
| 380 | /** |
| 381 | * ol_tx_desc_dup_detect_set() - set bit for msdu_id |
| 382 | * @pdev: pdev handle |
| 383 | * @tx_desc: tx descriptor |
| 384 | * |
| 385 | * Return: none |
| 386 | */ |
| 387 | static inline |
| 388 | void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev, |
| 389 | struct ol_tx_desc_t *tx_desc) |
| 390 | { |
| 391 | uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc); |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 392 | bool test; |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 393 | |
| 394 | if (!pdev->tx_desc.free_list_bitmap) |
| 395 | return; |
| 396 | |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 397 | if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) { |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 398 | qdf_print("msdu_id %d > pool_size %d", |
| 399 | msdu_id, pdev->tx_desc.pool_size); |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 400 | QDF_BUG(0); |
| 401 | } |
| 402 | |
| 403 | test = test_and_set_bit(msdu_id, pdev->tx_desc.free_list_bitmap); |
| 404 | if (qdf_unlikely(test)) { |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 405 | uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) + |
| 406 | ((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0); |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 407 | qdf_print("duplicate msdu_id %d detected!!", msdu_id); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 408 | qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 409 | (void *)pdev->tx_desc.free_list_bitmap, size); |
| 410 | QDF_BUG(0); |
| 411 | } |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | /** |
| 415 | * ol_tx_desc_dup_detect_reset() - reset bit for msdu_id |
| 416 | * @pdev: pdev handle |
| 417 | * @tx_desc: tx descriptor |
| 418 | * |
| 419 | * Return: none |
| 420 | */ |
| 421 | static inline |
| 422 | void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev, |
| 423 | struct ol_tx_desc_t *tx_desc) |
| 424 | { |
| 425 | uint16_t msdu_id = ol_tx_desc_id(pdev, tx_desc); |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 426 | bool test; |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 427 | |
| 428 | if (!pdev->tx_desc.free_list_bitmap) |
| 429 | return; |
| 430 | |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 431 | if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) { |
Nirav Shah | 7c8c171 | 2018-09-10 16:01:31 +0530 | [diff] [blame] | 432 | qdf_print("msdu_id %d > pool_size %d", |
| 433 | msdu_id, pdev->tx_desc.pool_size); |
Houston Hoffman | 088e4b9 | 2016-09-01 13:51:06 -0700 | [diff] [blame] | 434 | QDF_BUG(0); |
| 435 | } |
| 436 | |
| 437 | test = !test_and_clear_bit(msdu_id, pdev->tx_desc.free_list_bitmap); |
| 438 | if (qdf_unlikely(test)) { |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 439 | uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) + |
| 440 | ((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0); |
| 441 | qdf_print("duplicate free msg received for msdu_id %d!!\n", |
| 442 | msdu_id); |
| 443 | qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
| 444 | (void *)pdev->tx_desc.free_list_bitmap, size); |
| 445 | QDF_BUG(0); |
| 446 | } |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 447 | } |
| 448 | #else |
| 449 | static inline |
| 450 | void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t size) |
| 451 | { |
| 452 | } |
| 453 | |
| 454 | static inline |
| 455 | void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev) |
| 456 | { |
| 457 | } |
| 458 | |
| 459 | static inline |
| 460 | void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev, |
| 461 | struct ol_tx_desc_t *tx_desc) |
| 462 | { |
| 463 | } |
| 464 | |
| 465 | static inline |
| 466 | void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev, |
| 467 | struct ol_tx_desc_t *tx_desc) |
| 468 | { |
| 469 | } |
| 470 | #endif |
| 471 | |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 472 | enum extension_header_type |
| 473 | ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev, |
| 474 | qdf_nbuf_t netbuf); |
| 475 | enum extension_header_type |
| 476 | ol_tx_get_wisa_ext_type(qdf_nbuf_t netbuf); |
| 477 | |
| 478 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 479 | #endif /* _OL_TX_DESC__H_ */ |