Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 2 | * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 28 | #include <qdf_net_types.h> /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 29 | #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */ |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 30 | #include <qdf_util.h> /* qdf_assert */ |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 31 | #include <qdf_lock.h> /* qdf_spinlock */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 32 | #ifdef QCA_COMPUTE_TX_DELAY |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 33 | #include <qdf_time.h> /* qdf_system_ticks */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 34 | #endif |
| 35 | |
| 36 | #include <ol_htt_tx_api.h> /* htt_tx_desc_id */ |
| 37 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 38 | #include <ol_tx_desc.h> |
| 39 | #include <ol_txrx_internal.h> |
| 40 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 41 | #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */ |
| 42 | #endif |
| 43 | #include <ol_txrx.h> |
| 44 | |
| 45 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 46 | extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr; |
| 47 | #endif |
| 48 | |
| 49 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 50 | static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev, |
| 51 | struct ol_tx_desc_t *tx_desc) |
| 52 | { |
| 53 | if (tx_desc->pkt_type != 0xff) { |
| 54 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 55 | "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p", |
| 56 | __func__, tx_desc->pkt_type, pdev); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 57 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 58 | } |
| 59 | if ((uint32_t *) tx_desc->htt_tx_desc < |
| 60 | g_dbg_htt_desc_start_addr |
| 61 | || (uint32_t *) tx_desc->htt_tx_desc > |
| 62 | g_dbg_htt_desc_end_addr) { |
| 63 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 64 | "%s Potential htt_desc curruption:0x%p pdev:0x%p\n", |
| 65 | __func__, tx_desc->htt_tx_desc, pdev); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 66 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 67 | } |
| 68 | } |
| 69 | static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc) |
| 70 | { |
| 71 | tx_desc->pkt_type = 0xff; |
| 72 | } |
| 73 | #ifdef QCA_COMPUTE_TX_DELAY |
| 74 | static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) |
| 75 | { |
| 76 | if (tx_desc->entry_timestamp_ticks != 0xffffffff) { |
| 77 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n", |
| 78 | __func__, tx_desc->entry_timestamp_ticks); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 79 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 80 | } |
Anurag Chouhan | 50220ce | 2016-02-18 20:11:33 +0530 | [diff] [blame] | 81 | tx_desc->entry_timestamp_ticks = qdf_system_ticks(); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 82 | } |
| 83 | static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc) |
| 84 | { |
| 85 | tx_desc->entry_timestamp_ticks = 0xffffffff; |
| 86 | } |
| 87 | #endif |
| 88 | #else |
| 89 | static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev, |
| 90 | struct ol_tx_desc_t *tx_desc) |
| 91 | { |
| 92 | return; |
| 93 | } |
| 94 | static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc) |
| 95 | { |
| 96 | return; |
| 97 | } |
| 98 | static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) |
| 99 | { |
| 100 | return; |
| 101 | } |
| 102 | static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc) |
| 103 | { |
| 104 | return; |
| 105 | } |
| 106 | #endif |
| 107 | |
Manjunathappa Prakash | af88fc7 | 2016-11-02 17:26:22 -0700 | [diff] [blame] | 108 | #ifdef CONFIG_HL_SUPPORT |
| 109 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 110 | /** |
| 111 | * ol_tx_desc_vdev_update() - vedv assign. |
| 112 | * @tx_desc: tx descriptor pointer |
| 113 | * @vdev: vdev handle |
| 114 | * |
| 115 | * Return: None |
| 116 | */ |
| 117 | static inline void |
| 118 | ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc, |
| 119 | struct ol_txrx_vdev_t *vdev) |
| 120 | { |
| 121 | tx_desc->vdev = vdev; |
| 122 | } |
Manjunathappa Prakash | af88fc7 | 2016-11-02 17:26:22 -0700 | [diff] [blame] | 123 | #else |
| 124 | |
| 125 | static inline void |
| 126 | ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc, |
| 127 | struct ol_txrx_vdev_t *vdev) |
| 128 | { |
| 129 | return; |
| 130 | } |
| 131 | #endif |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 132 | |
| 133 | #ifdef CONFIG_PER_VDEV_TX_DESC_POOL |
| 134 | |
| 135 | /** |
| 136 | * ol_tx_desc_count_inc() - tx desc count increment for desc allocation. |
| 137 | * @vdev: vdev handle |
| 138 | * |
| 139 | * Return: None |
| 140 | */ |
| 141 | static inline void |
| 142 | ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev) |
| 143 | { |
| 144 | qdf_atomic_inc(&vdev->tx_desc_count); |
| 145 | } |
| 146 | #else |
| 147 | |
| 148 | static inline void |
| 149 | ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev) |
| 150 | { |
| 151 | return; |
| 152 | } |
| 153 | |
| 154 | #endif |
| 155 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 156 | #ifndef QCA_LL_TX_FLOW_CONTROL_V2 |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 157 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 158 | /** |
| 159 | * ol_tx_desc_alloc() - allocate descriptor from freelist |
| 160 | * @pdev: pdev handle |
| 161 | * @vdev: vdev handle |
| 162 | * |
| 163 | * Return: tx descriptor pointer/ NULL in case of error |
| 164 | */ |
| 165 | static |
| 166 | struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 167 | struct ol_txrx_vdev_t *vdev) |
| 168 | { |
| 169 | struct ol_tx_desc_t *tx_desc = NULL; |
| 170 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 171 | qdf_spin_lock_bh(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 172 | if (pdev->tx_desc.freelist) { |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 173 | tx_desc = ol_tx_get_desc_global_pool(pdev); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 174 | ol_tx_desc_dup_detect_set(pdev, tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 175 | ol_tx_desc_sanity_checks(pdev, tx_desc); |
| 176 | ol_tx_desc_compute_delay(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 177 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 178 | qdf_spin_unlock_bh(&pdev->tx_mutex); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 179 | |
| 180 | if (!tx_desc) |
| 181 | return NULL; |
| 182 | |
| 183 | ol_tx_desc_vdev_update(tx_desc, vdev); |
| 184 | ol_tx_desc_count_inc(vdev); |
Himanshu Agarwal | 749e0f2 | 2016-10-26 21:12:59 +0530 | [diff] [blame] | 185 | qdf_atomic_inc(&tx_desc->ref_cnt); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 186 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 187 | return tx_desc; |
| 188 | } |
| 189 | |
| 190 | /** |
| 191 | * ol_tx_desc_alloc_wrapper() -allocate tx descriptor |
| 192 | * @pdev: pdev handler |
| 193 | * @vdev: vdev handler |
| 194 | * @msdu_info: msdu handler |
| 195 | * |
| 196 | * Return: tx descriptor or NULL |
| 197 | */ |
| 198 | struct ol_tx_desc_t * |
| 199 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 200 | struct ol_txrx_vdev_t *vdev, |
| 201 | struct ol_txrx_msdu_info_t *msdu_info) |
| 202 | { |
| 203 | return ol_tx_desc_alloc(pdev, vdev); |
| 204 | } |
| 205 | |
| 206 | #else |
| 207 | /** |
| 208 | * ol_tx_desc_alloc() -allocate tx descriptor |
| 209 | * @pdev: pdev handler |
| 210 | * @vdev: vdev handler |
| 211 | * @pool: flow pool |
| 212 | * |
| 213 | * Return: tx descriptor or NULL |
| 214 | */ |
| 215 | static |
| 216 | struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 217 | struct ol_txrx_vdev_t *vdev, |
| 218 | struct ol_tx_flow_pool_t *pool) |
| 219 | { |
| 220 | struct ol_tx_desc_t *tx_desc = NULL; |
| 221 | |
| 222 | if (pool) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 223 | qdf_spin_lock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 224 | if (pool->avail_desc) { |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 225 | tx_desc = ol_tx_get_desc_flow_pool(pool); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 226 | ol_tx_desc_dup_detect_set(pdev, tx_desc); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 227 | if (qdf_unlikely(pool->avail_desc < pool->stop_th)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 228 | pool->status = FLOW_POOL_ACTIVE_PAUSED; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 229 | qdf_spin_unlock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 230 | /* pause network queues */ |
| 231 | pdev->pause_cb(vdev->vdev_id, |
| 232 | WLAN_STOP_ALL_NETIF_QUEUE, |
| 233 | WLAN_DATA_FLOW_CONTROL); |
| 234 | } else { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 235 | qdf_spin_unlock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 236 | } |
| 237 | ol_tx_desc_sanity_checks(pdev, tx_desc); |
| 238 | ol_tx_desc_compute_delay(tx_desc); |
Himanshu Agarwal | 749e0f2 | 2016-10-26 21:12:59 +0530 | [diff] [blame] | 239 | qdf_atomic_inc(&tx_desc->ref_cnt); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 240 | } else { |
Nirav Shah | da00834 | 2016-05-17 18:50:40 +0530 | [diff] [blame] | 241 | pool->pkt_drop_no_desc++; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 242 | qdf_spin_unlock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 243 | } |
| 244 | } else { |
| 245 | pdev->pool_stats.pkt_drop_no_pool++; |
| 246 | } |
| 247 | |
| 248 | return tx_desc; |
| 249 | } |
| 250 | |
| 251 | /** |
| 252 | * ol_tx_desc_alloc_wrapper() -allocate tx descriptor |
| 253 | * @pdev: pdev handler |
| 254 | * @vdev: vdev handler |
| 255 | * @msdu_info: msdu handler |
| 256 | * |
| 257 | * Return: tx descriptor or NULL |
| 258 | */ |
| 259 | #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL |
| 260 | struct ol_tx_desc_t * |
| 261 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 262 | struct ol_txrx_vdev_t *vdev, |
| 263 | struct ol_txrx_msdu_info_t *msdu_info) |
| 264 | { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 265 | if (qdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 266 | return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool); |
| 267 | else |
| 268 | return ol_tx_desc_alloc(pdev, vdev, vdev->pool); |
| 269 | } |
| 270 | #else |
| 271 | struct ol_tx_desc_t * |
| 272 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 273 | struct ol_txrx_vdev_t *vdev, |
| 274 | struct ol_txrx_msdu_info_t *msdu_info) |
| 275 | { |
| 276 | return ol_tx_desc_alloc(pdev, vdev, vdev->pool); |
| 277 | } |
| 278 | #endif |
| 279 | #endif |
| 280 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 281 | /** |
| 282 | * ol_tx_desc_alloc_hl() - allocate tx descriptor |
| 283 | * @pdev: pdev handle |
| 284 | * @vdev: vdev handle |
| 285 | * @msdu_info: tx msdu info |
| 286 | * |
| 287 | * Return: tx descriptor pointer/ NULL in case of error |
| 288 | */ |
| 289 | static struct ol_tx_desc_t * |
| 290 | ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev, |
| 291 | struct ol_txrx_vdev_t *vdev, |
| 292 | struct ol_txrx_msdu_info_t *msdu_info) |
| 293 | { |
| 294 | struct ol_tx_desc_t *tx_desc; |
| 295 | |
| 296 | tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); |
| 297 | if (!tx_desc) |
| 298 | return NULL; |
| 299 | |
| 300 | qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt); |
| 301 | |
| 302 | return tx_desc; |
| 303 | } |
| 304 | |
| 305 | #if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT) |
| 306 | |
| 307 | /** |
| 308 | * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev. |
| 309 | * @tx_desc: tx desc |
| 310 | * |
| 311 | * Return: None |
| 312 | */ |
| 313 | static inline void |
| 314 | ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc) |
| 315 | { |
| 316 | qdf_atomic_dec(&tx_desc->vdev->tx_desc_count); |
| 317 | tx_desc->vdev = NULL; |
| 318 | } |
| 319 | #else |
| 320 | |
| 321 | static inline void |
| 322 | ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc) |
| 323 | { |
| 324 | return; |
| 325 | } |
| 326 | #endif |
| 327 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 328 | #ifndef QCA_LL_TX_FLOW_CONTROL_V2 |
| 329 | /** |
| 330 | * ol_tx_desc_free() - put descriptor to freelist |
| 331 | * @pdev: pdev handle |
| 332 | * @tx_desc: tx descriptor |
| 333 | * |
| 334 | * Return: None |
| 335 | */ |
| 336 | void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) |
| 337 | { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 338 | qdf_spin_lock_bh(&pdev->tx_mutex); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 339 | |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 340 | if (tx_desc->pkt_type == OL_TX_FRM_TSO) { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 341 | if (qdf_unlikely(tx_desc->tso_desc == NULL)) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 342 | qdf_print("%s %d TSO desc is NULL!\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 343 | __func__, __LINE__); |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 344 | qdf_assert(0); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 345 | } else { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 346 | ol_tso_free_segment(pdev, tx_desc->tso_desc); |
Dhanashri Atre | 83d373d | 2015-07-28 16:45:59 -0700 | [diff] [blame] | 347 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 348 | } |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 349 | ol_tx_desc_dup_detect_reset(pdev, tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 350 | ol_tx_desc_reset_pkt_type(tx_desc); |
| 351 | ol_tx_desc_reset_timestamp(tx_desc); |
| 352 | |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 353 | ol_tx_put_desc_global_pool(pdev, tx_desc); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 354 | ol_tx_desc_vdev_rm(tx_desc); |
| 355 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 356 | qdf_spin_unlock_bh(&pdev->tx_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | #else |
| 360 | /** |
| 361 | * ol_tx_desc_free() - put descriptor to pool freelist |
| 362 | * @pdev: pdev handle |
| 363 | * @tx_desc: tx descriptor |
| 364 | * |
| 365 | * Return: None |
| 366 | */ |
| 367 | void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) |
| 368 | { |
| 369 | struct ol_tx_flow_pool_t *pool = tx_desc->pool; |
| 370 | |
| 371 | #if defined(FEATURE_TSO) |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 372 | if (tx_desc->pkt_type == OL_TX_FRM_TSO) { |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 373 | if (qdf_unlikely(tx_desc->tso_desc == NULL)) |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 374 | qdf_print("%s %d TSO desc is NULL!\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 375 | __func__, __LINE__); |
| 376 | else |
| 377 | ol_tso_free_segment(pdev, tx_desc->tso_desc); |
| 378 | } |
| 379 | #endif |
| 380 | ol_tx_desc_reset_pkt_type(tx_desc); |
| 381 | ol_tx_desc_reset_timestamp(tx_desc); |
| 382 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 383 | qdf_spin_lock_bh(&pool->flow_pool_lock); |
Nirav Shah | 7629196 | 2016-04-25 10:50:37 +0530 | [diff] [blame] | 384 | ol_tx_desc_dup_detect_reset(pdev, tx_desc); |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame] | 385 | ol_tx_put_desc_flow_pool(pool, tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 386 | switch (pool->status) { |
| 387 | case FLOW_POOL_ACTIVE_PAUSED: |
| 388 | if (pool->avail_desc > pool->start_th) { |
| 389 | pdev->pause_cb(pool->member_flow_id, |
| 390 | WLAN_WAKE_ALL_NETIF_QUEUE, |
| 391 | WLAN_DATA_FLOW_CONTROL); |
| 392 | pool->status = FLOW_POOL_ACTIVE_UNPAUSED; |
| 393 | } |
| 394 | break; |
| 395 | case FLOW_POOL_INVALID: |
| 396 | if (pool->avail_desc == pool->flow_pool_size) { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 397 | qdf_spin_unlock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 398 | ol_tx_free_invalid_flow_pool(pool); |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 399 | qdf_print("%s %d pool is INVALID State!!\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 400 | __func__, __LINE__); |
| 401 | return; |
| 402 | } |
| 403 | break; |
| 404 | case FLOW_POOL_ACTIVE_UNPAUSED: |
| 405 | break; |
| 406 | default: |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 407 | qdf_print("%s %d pool is INACTIVE State!!\n", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 408 | __func__, __LINE__); |
| 409 | break; |
| 410 | }; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 411 | qdf_spin_unlock_bh(&pool->flow_pool_lock); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 412 | |
| 413 | } |
| 414 | #endif |
| 415 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 416 | void |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 417 | dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 418 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 419 | qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 420 | qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len); |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 421 | print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 422 | qdf_nbuf_data(nbuf), len, true); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | const uint32_t htt_to_ce_pkt_type[] = { |
| 426 | [htt_pkt_type_raw] = tx_pkt_type_raw, |
| 427 | [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi, |
| 428 | [htt_pkt_type_ethernet] = tx_pkt_type_802_3, |
| 429 | [htt_pkt_type_mgmt] = tx_pkt_type_mgmt, |
| 430 | [htt_pkt_type_eth2] = tx_pkt_type_eth2, |
| 431 | [htt_pkt_num_types] = 0xffffffff |
| 432 | }; |
| 433 | |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 434 | #define WISA_DEST_PORT_6MBPS 50000 |
| 435 | #define WISA_DEST_PORT_24MBPS 50001 |
| 436 | |
| 437 | /** |
| 438 | * ol_tx_get_wisa_ext_hdr_type() - get header type for WiSA mode |
| 439 | * @netbuf: network buffer |
| 440 | * |
| 441 | * Return: extension header type |
| 442 | */ |
| 443 | enum extension_header_type |
| 444 | ol_tx_get_wisa_ext_hdr_type(qdf_nbuf_t netbuf) |
| 445 | { |
| 446 | uint8_t *buf = qdf_nbuf_data(netbuf); |
| 447 | uint16_t dport; |
| 448 | |
| 449 | if (qdf_is_macaddr_group( |
| 450 | (struct qdf_mac_addr *)(buf + QDF_NBUF_DEST_MAC_OFFSET))) { |
| 451 | |
| 452 | dport = (uint16_t)(*(uint16_t *)(buf + |
| 453 | QDF_NBUF_TRAC_IPV4_OFFSET + |
| 454 | QDF_NBUF_TRAC_IPV4_HEADER_SIZE + sizeof(uint16_t))); |
| 455 | |
| 456 | if (dport == QDF_SWAP_U16(WISA_DEST_PORT_6MBPS)) |
| 457 | return WISA_MODE_EXT_HEADER_6MBPS; |
| 458 | else if (dport == QDF_SWAP_U16(WISA_DEST_PORT_24MBPS)) |
| 459 | return WISA_MODE_EXT_HEADER_24MBPS; |
| 460 | else |
| 461 | return EXT_HEADER_NOT_PRESENT; |
| 462 | } else { |
| 463 | return EXT_HEADER_NOT_PRESENT; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /** |
| 468 | * ol_tx_get_ext_header_type() - extension header is required or not |
| 469 | * @vdev: vdev pointer |
| 470 | * @netbuf: network buffer |
| 471 | * |
| 472 | * This function returns header type and if extension header is |
| 473 | * not required than returns EXT_HEADER_NOT_PRESENT. |
| 474 | * |
| 475 | * Return: extension header type |
| 476 | */ |
| 477 | enum extension_header_type |
| 478 | ol_tx_get_ext_header_type(struct ol_txrx_vdev_t *vdev, |
| 479 | qdf_nbuf_t netbuf) |
| 480 | { |
| 481 | if (vdev->is_wisa_mode_enable == true) |
| 482 | return ol_tx_get_wisa_ext_hdr_type(netbuf); |
| 483 | else |
| 484 | return EXT_HEADER_NOT_PRESENT; |
| 485 | } |
| 486 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 487 | struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev, |
| 488 | struct ol_txrx_vdev_t *vdev, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 489 | qdf_nbuf_t netbuf, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 490 | struct ol_txrx_msdu_info_t *msdu_info) |
| 491 | { |
| 492 | struct ol_tx_desc_t *tx_desc; |
| 493 | unsigned int i; |
| 494 | uint32_t num_frags; |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 495 | enum extension_header_type type; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 496 | |
| 497 | msdu_info->htt.info.vdev_id = vdev->vdev_id; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 498 | msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf); |
| 499 | switch (qdf_nbuf_get_exemption_type(netbuf)) { |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 500 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 501 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 502 | /* We want to encrypt this frame */ |
| 503 | msdu_info->htt.action.do_encrypt = 1; |
| 504 | break; |
Anurag Chouhan | c73697b | 2016-02-21 15:05:43 +0530 | [diff] [blame] | 505 | case QDF_NBUF_EXEMPT_ALWAYS: |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 506 | /* We don't want to encrypt this frame */ |
| 507 | msdu_info->htt.action.do_encrypt = 0; |
| 508 | break; |
| 509 | default: |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 510 | qdf_assert(0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 511 | break; |
| 512 | } |
| 513 | |
| 514 | /* allocate the descriptor */ |
| 515 | tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); |
| 516 | if (!tx_desc) |
| 517 | return NULL; |
| 518 | |
| 519 | /* initialize the SW tx descriptor */ |
| 520 | tx_desc->netbuf = netbuf; |
| 521 | |
| 522 | if (msdu_info->tso_info.is_tso) { |
| 523 | tx_desc->tso_desc = msdu_info->tso_info.curr_seg; |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 524 | tx_desc->pkt_type = OL_TX_FRM_TSO; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 525 | TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf); |
| 526 | } else { |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 527 | tx_desc->pkt_type = OL_TX_FRM_STD; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 528 | } |
| 529 | |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 530 | type = ol_tx_get_ext_header_type(vdev, netbuf); |
| 531 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 532 | /* initialize the HW tx descriptor */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 533 | htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 534 | tx_desc->htt_tx_desc_paddr, |
| 535 | ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt, |
Nirav Shah | 2e583a0 | 2016-04-30 14:06:12 +0530 | [diff] [blame] | 536 | &msdu_info->tso_info, NULL, type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 537 | |
| 538 | /* |
| 539 | * Initialize the fragmentation descriptor. |
| 540 | * Skip the prefix fragment (HTT tx descriptor) that was added |
| 541 | * during the call to htt_tx_desc_init above. |
| 542 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 543 | num_frags = qdf_nbuf_get_num_frags(netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 544 | /* num_frags are expected to be 2 max */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 545 | num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS) |
| 546 | ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 547 | : num_frags; |
| 548 | #if defined(HELIUMPLUS_PADDR64) |
| 549 | /* |
| 550 | * Use num_frags - 1, since 1 frag is used to store |
| 551 | * the HTT/HTC descriptor |
| 552 | * Refer to htt_tx_desc_init() |
| 553 | */ |
| 554 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 555 | num_frags - 1); |
| 556 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 557 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 558 | num_frags - 1); |
| 559 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 560 | |
| 561 | if (msdu_info->tso_info.is_tso) { |
| 562 | htt_tx_desc_fill_tso_info(pdev->htt_pdev, |
| 563 | tx_desc->htt_frag_desc, &msdu_info->tso_info); |
| 564 | TXRX_STATS_TSO_SEG_UPDATE(pdev, |
| 565 | msdu_info->tso_info.curr_seg->seg); |
| 566 | } else { |
| 567 | for (i = 1; i < num_frags; i++) { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 568 | qdf_size_t frag_len; |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 569 | qdf_dma_addr_t frag_paddr; |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 570 | #ifdef HELIUMPLUS_DEBUG |
| 571 | void *frag_vaddr; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 572 | frag_vaddr = qdf_nbuf_get_frag_vaddr(netbuf, i); |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 573 | #endif |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 574 | frag_len = qdf_nbuf_get_frag_len(netbuf, i); |
| 575 | frag_paddr = qdf_nbuf_get_frag_paddr(netbuf, i); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 576 | #if defined(HELIUMPLUS_PADDR64) |
| 577 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1, |
| 578 | frag_paddr, frag_len); |
| 579 | #if defined(HELIUMPLUS_DEBUG) |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 580 | qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n", |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 581 | __func__, __LINE__, tx_desc->htt_frag_desc, |
| 582 | i-1, frag_vaddr, frag_paddr, frag_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 583 | dump_pkt(netbuf, frag_paddr, 64); |
| 584 | #endif /* HELIUMPLUS_DEBUG */ |
| 585 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 586 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1, |
| 587 | frag_paddr, frag_len); |
| 588 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 589 | } |
| 590 | } |
| 591 | |
| 592 | #if defined(HELIUMPLUS_DEBUG) |
Himanshu Agarwal | fd68172 | 2016-10-27 19:07:25 +0530 | [diff] [blame] | 593 | ol_txrx_dump_frag_desc("ol_tx_desc_ll()", tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 594 | #endif |
| 595 | return tx_desc; |
| 596 | } |
| 597 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 598 | struct ol_tx_desc_t * |
| 599 | ol_tx_desc_hl( |
| 600 | struct ol_txrx_pdev_t *pdev, |
| 601 | struct ol_txrx_vdev_t *vdev, |
| 602 | qdf_nbuf_t netbuf, |
| 603 | struct ol_txrx_msdu_info_t *msdu_info) |
| 604 | { |
| 605 | struct ol_tx_desc_t *tx_desc; |
| 606 | |
| 607 | /* FIX THIS: these inits should probably be done by tx classify */ |
| 608 | msdu_info->htt.info.vdev_id = vdev->vdev_id; |
| 609 | msdu_info->htt.info.frame_type = pdev->htt_pkt_type; |
| 610 | msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf); |
| 611 | switch (qdf_nbuf_get_exemption_type(netbuf)) { |
| 612 | case QDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 613 | case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
| 614 | /* We want to encrypt this frame */ |
| 615 | msdu_info->htt.action.do_encrypt = 1; |
| 616 | break; |
| 617 | case QDF_NBUF_EXEMPT_ALWAYS: |
| 618 | /* We don't want to encrypt this frame */ |
| 619 | msdu_info->htt.action.do_encrypt = 0; |
| 620 | break; |
| 621 | default: |
| 622 | qdf_assert(0); |
| 623 | break; |
| 624 | } |
| 625 | |
| 626 | /* allocate the descriptor */ |
| 627 | tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info); |
| 628 | if (!tx_desc) |
| 629 | return NULL; |
| 630 | |
| 631 | /* initialize the SW tx descriptor */ |
| 632 | tx_desc->netbuf = netbuf; |
| 633 | /* fix this - get pkt_type from msdu_info */ |
| 634 | tx_desc->pkt_type = OL_TX_FRM_STD; |
| 635 | |
| 636 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 637 | tx_desc->orig_l2_hdr_bytes = 0; |
| 638 | #endif |
| 639 | /* the HW tx descriptor will be initialized later by the caller */ |
| 640 | |
| 641 | return tx_desc; |
| 642 | } |
| 643 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 644 | void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev, |
| 645 | ol_tx_desc_list *tx_descs, int had_error) |
| 646 | { |
| 647 | struct ol_tx_desc_t *tx_desc, *tmp; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 648 | qdf_nbuf_t msdus = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 649 | |
| 650 | TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 651 | qdf_nbuf_t msdu = tx_desc->netbuf; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 652 | |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 653 | qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 654 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 655 | /* restore original hdr offset */ |
| 656 | OL_TX_RESTORE_HDR(tx_desc, msdu); |
| 657 | #endif |
Mohit Khanna | 38d0e93 | 2016-08-31 19:49:22 -0700 | [diff] [blame] | 658 | if (qdf_nbuf_get_users(msdu) <= 1) |
| 659 | qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_TO_DEVICE); |
| 660 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 661 | /* free the tx desc */ |
| 662 | ol_tx_desc_free(pdev, tx_desc); |
| 663 | /* link the netbuf into a list to free as a batch */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 664 | qdf_nbuf_set_next(msdu, msdus); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 665 | msdus = msdu; |
| 666 | } |
| 667 | /* free the netbufs as a batch */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 668 | qdf_nbuf_tx_free(msdus, had_error); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 669 | } |
| 670 | |
| 671 | void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev, |
| 672 | struct ol_tx_desc_t *tx_desc, int had_error) |
| 673 | { |
| 674 | int mgmt_type; |
| 675 | ol_txrx_mgmt_tx_cb ota_ack_cb; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 676 | |
Anurag Chouhan | 8e0ccd3 | 2016-02-19 15:30:20 +0530 | [diff] [blame] | 677 | qdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 678 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 679 | /* restore original hdr offset */ |
| 680 | OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf)); |
| 681 | #endif |
Prakash Manjunathappa | 6dc1a96 | 2016-05-05 19:32:53 -0700 | [diff] [blame] | 682 | if (tx_desc->pkt_type == OL_TX_FRM_NO_FREE) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 683 | /* free the tx desc but don't unmap or free the frame */ |
| 684 | if (pdev->tx_data_callback.func) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 685 | qdf_nbuf_set_next(tx_desc->netbuf, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 686 | pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt, |
| 687 | tx_desc->netbuf, had_error); |
| 688 | ol_tx_desc_free(pdev, tx_desc); |
| 689 | return; |
| 690 | } |
| 691 | /* let the code below unmap and free the frame */ |
| 692 | } |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 693 | qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 694 | /* check the frame type to see what kind of special steps are needed */ |
| 695 | if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) && |
| 696 | (tx_desc->pkt_type != 0xff)) { |
Anurag Chouhan | df2b268 | 2016-02-29 14:15:27 +0530 | [diff] [blame] | 697 | qdf_dma_addr_t frag_desc_paddr = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 698 | |
| 699 | #if defined(HELIUMPLUS_PADDR64) |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 700 | frag_desc_paddr = tx_desc->htt_frag_desc_paddr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 701 | /* FIX THIS - |
| 702 | * The FW currently has trouble using the host's fragments |
| 703 | * table for management frames. Until this is fixed, |
| 704 | * rather than specifying the fragment table to the FW, |
| 705 | * the host SW will specify just the address of the initial |
| 706 | * fragment. |
| 707 | * Now that the mgmt frame is done, the HTT tx desc's frags |
| 708 | * table pointer needs to be reset. |
| 709 | */ |
| 710 | #if defined(HELIUMPLUS_DEBUG) |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 711 | qdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n", |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 712 | __func__, __LINE__, tx_desc->id, |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 713 | frag_desc_paddr); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 714 | #endif /* HELIUMPLUS_DEBUG */ |
| 715 | #endif /* HELIUMPLUS_PADDR64 */ |
| 716 | htt_tx_desc_frags_table_set(pdev->htt_pdev, |
| 717 | tx_desc->htt_tx_desc, 0, |
Houston Hoffman | 43d47fa | 2016-02-24 16:34:30 -0800 | [diff] [blame] | 718 | frag_desc_paddr, 1); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 719 | |
| 720 | mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE; |
| 721 | /* |
| 722 | * we already checked the value when the mgmt frame was |
| 723 | * provided to the txrx layer. |
| 724 | * no need to check it a 2nd time. |
| 725 | */ |
| 726 | ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb; |
| 727 | if (ota_ack_cb) { |
| 728 | void *ctxt; |
| 729 | ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt; |
| 730 | ota_ack_cb(ctxt, tx_desc->netbuf, had_error); |
| 731 | } |
| 732 | /* free the netbuf */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 733 | qdf_nbuf_free(tx_desc->netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 734 | } else { |
| 735 | /* single regular frame */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 736 | qdf_nbuf_set_next(tx_desc->netbuf, NULL); |
| 737 | qdf_nbuf_tx_free(tx_desc->netbuf, had_error); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 738 | } |
| 739 | /* free the tx desc */ |
| 740 | ol_tx_desc_free(pdev, tx_desc); |
| 741 | } |
| 742 | |
| 743 | #if defined(FEATURE_TSO) |
| 744 | /** |
| 745 | * htt_tso_alloc_segment() - function to allocate a TSO segment |
| 746 | * element |
| 747 | * @pdev: HTT pdev |
| 748 | * @tso_seg: This is the output. The TSO segment element. |
| 749 | * |
| 750 | * Allocates a TSO segment element from the free list held in |
| 751 | * the HTT pdev |
| 752 | * |
| 753 | * Return: none |
| 754 | */ |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 755 | struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 756 | { |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 757 | struct qdf_tso_seg_elem_t *tso_seg = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 758 | |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 759 | qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 760 | if (pdev->tso_seg_pool.freelist) { |
| 761 | pdev->tso_seg_pool.num_free--; |
| 762 | tso_seg = pdev->tso_seg_pool.freelist; |
| 763 | pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next; |
| 764 | } |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 765 | qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 766 | |
| 767 | return tso_seg; |
| 768 | } |
| 769 | |
| 770 | /** |
| 771 | * ol_tso_free_segment() - function to free a TSO segment |
| 772 | * element |
| 773 | * @pdev: HTT pdev |
| 774 | * @tso_seg: The TSO segment element to be freed |
| 775 | * |
| 776 | * Returns a TSO segment element to the free list held in the |
| 777 | * HTT pdev |
| 778 | * |
| 779 | * Return: none |
| 780 | */ |
| 781 | |
| 782 | void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev, |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 783 | struct qdf_tso_seg_elem_t *tso_seg) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 784 | { |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 785 | qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 786 | tso_seg->next = pdev->tso_seg_pool.freelist; |
| 787 | pdev->tso_seg_pool.freelist = tso_seg; |
| 788 | pdev->tso_seg_pool.num_free++; |
Anurag Chouhan | a37b5b7 | 2016-02-21 14:53:42 +0530 | [diff] [blame] | 789 | qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 790 | } |
| 791 | #endif |