Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | #include <cdf_net_types.h> /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ |
| 29 | #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */ |
| 30 | #include <cdf_util.h> /* cdf_assert */ |
| 31 | #include <cdf_lock.h> /* cdf_spinlock */ |
| 32 | #ifdef QCA_COMPUTE_TX_DELAY |
| 33 | #include <cdf_time.h> /* cdf_system_ticks */ |
| 34 | #endif |
| 35 | |
| 36 | #include <ol_htt_tx_api.h> /* htt_tx_desc_id */ |
| 37 | |
| 38 | #include <ol_txrx_types.h> /* ol_txrx_pdev_t */ |
| 39 | #include <ol_tx_desc.h> |
| 40 | #include <ol_txrx_internal.h> |
| 41 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 42 | #include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */ |
| 43 | #endif |
| 44 | #include <ol_txrx.h> |
| 45 | |
| 46 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 47 | extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr; |
| 48 | #endif |
| 49 | |
| 50 | #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS |
| 51 | static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev, |
| 52 | struct ol_tx_desc_t *tx_desc) |
| 53 | { |
| 54 | if (tx_desc->pkt_type != 0xff) { |
| 55 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 56 | "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p", |
| 57 | __func__, tx_desc->pkt_type, pdev); |
| 58 | cdf_assert(0); |
| 59 | } |
| 60 | if ((uint32_t *) tx_desc->htt_tx_desc < |
| 61 | g_dbg_htt_desc_start_addr |
| 62 | || (uint32_t *) tx_desc->htt_tx_desc > |
| 63 | g_dbg_htt_desc_end_addr) { |
| 64 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, |
| 65 | "%s Potential htt_desc curruption:0x%p pdev:0x%p\n", |
| 66 | __func__, tx_desc->htt_tx_desc, pdev); |
| 67 | cdf_assert(0); |
| 68 | } |
| 69 | } |
| 70 | static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc) |
| 71 | { |
| 72 | tx_desc->pkt_type = 0xff; |
| 73 | } |
| 74 | #ifdef QCA_COMPUTE_TX_DELAY |
| 75 | static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) |
| 76 | { |
| 77 | if (tx_desc->entry_timestamp_ticks != 0xffffffff) { |
| 78 | TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n", |
| 79 | __func__, tx_desc->entry_timestamp_ticks); |
| 80 | cdf_assert(0); |
| 81 | } |
| 82 | tx_desc->entry_timestamp_ticks = cdf_system_ticks(); |
| 83 | } |
| 84 | static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc) |
| 85 | { |
| 86 | tx_desc->entry_timestamp_ticks = 0xffffffff; |
| 87 | } |
| 88 | #endif |
| 89 | #else |
| 90 | static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev, |
| 91 | struct ol_tx_desc_t *tx_desc) |
| 92 | { |
| 93 | return; |
| 94 | } |
| 95 | static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc) |
| 96 | { |
| 97 | return; |
| 98 | } |
| 99 | static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) |
| 100 | { |
| 101 | return; |
| 102 | } |
| 103 | static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc) |
| 104 | { |
| 105 | return; |
| 106 | } |
| 107 | #endif |
| 108 | |
| 109 | #ifndef QCA_LL_TX_FLOW_CONTROL_V2 |
| 110 | /** |
| 111 | * ol_tx_desc_alloc() - allocate descriptor from freelist |
| 112 | * @pdev: pdev handle |
| 113 | * @vdev: vdev handle |
| 114 | * |
| 115 | * Return: tx descriptor pointer/ NULL in case of error |
| 116 | */ |
| 117 | static |
| 118 | struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 119 | struct ol_txrx_vdev_t *vdev) |
| 120 | { |
| 121 | struct ol_tx_desc_t *tx_desc = NULL; |
| 122 | |
| 123 | cdf_spin_lock_bh(&pdev->tx_mutex); |
| 124 | if (pdev->tx_desc.freelist) { |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame^] | 125 | tx_desc = ol_tx_get_desc_global_pool(pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 126 | ol_tx_desc_sanity_checks(pdev, tx_desc); |
| 127 | ol_tx_desc_compute_delay(tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 128 | } |
| 129 | cdf_spin_unlock_bh(&pdev->tx_mutex); |
| 130 | if (!tx_desc) |
| 131 | return NULL; |
| 132 | |
| 133 | #if defined(CONFIG_PER_VDEV_TX_DESC_POOL) |
| 134 | tx_desc->vdev = vdev; |
| 135 | cdf_atomic_inc(&vdev->tx_desc_count); |
| 136 | #endif |
| 137 | |
| 138 | return tx_desc; |
| 139 | } |
| 140 | |
| 141 | /** |
| 142 | * ol_tx_desc_alloc_wrapper() -allocate tx descriptor |
| 143 | * @pdev: pdev handler |
| 144 | * @vdev: vdev handler |
| 145 | * @msdu_info: msdu handler |
| 146 | * |
| 147 | * Return: tx descriptor or NULL |
| 148 | */ |
| 149 | struct ol_tx_desc_t * |
| 150 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 151 | struct ol_txrx_vdev_t *vdev, |
| 152 | struct ol_txrx_msdu_info_t *msdu_info) |
| 153 | { |
| 154 | return ol_tx_desc_alloc(pdev, vdev); |
| 155 | } |
| 156 | |
| 157 | #else |
| 158 | /** |
| 159 | * ol_tx_desc_alloc() -allocate tx descriptor |
| 160 | * @pdev: pdev handler |
| 161 | * @vdev: vdev handler |
| 162 | * @pool: flow pool |
| 163 | * |
| 164 | * Return: tx descriptor or NULL |
| 165 | */ |
| 166 | static |
| 167 | struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, |
| 168 | struct ol_txrx_vdev_t *vdev, |
| 169 | struct ol_tx_flow_pool_t *pool) |
| 170 | { |
| 171 | struct ol_tx_desc_t *tx_desc = NULL; |
| 172 | |
| 173 | if (pool) { |
| 174 | cdf_spin_lock_bh(&pool->flow_pool_lock); |
| 175 | if (pool->avail_desc) { |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame^] | 176 | tx_desc = ol_tx_get_desc_flow_pool(pool); |
| 177 | if (cdf_unlikely(pool->avail_desc < pool->stop_th)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 178 | pool->status = FLOW_POOL_ACTIVE_PAUSED; |
| 179 | cdf_spin_unlock_bh(&pool->flow_pool_lock); |
| 180 | /* pause network queues */ |
| 181 | pdev->pause_cb(vdev->vdev_id, |
| 182 | WLAN_STOP_ALL_NETIF_QUEUE, |
| 183 | WLAN_DATA_FLOW_CONTROL); |
| 184 | } else { |
| 185 | cdf_spin_unlock_bh(&pool->flow_pool_lock); |
| 186 | } |
| 187 | ol_tx_desc_sanity_checks(pdev, tx_desc); |
| 188 | ol_tx_desc_compute_delay(tx_desc); |
| 189 | } else { |
| 190 | cdf_spin_unlock_bh(&pool->flow_pool_lock); |
| 191 | pdev->pool_stats.pkt_drop_no_desc++; |
| 192 | } |
| 193 | } else { |
| 194 | pdev->pool_stats.pkt_drop_no_pool++; |
| 195 | } |
| 196 | |
| 197 | return tx_desc; |
| 198 | } |
| 199 | |
| 200 | /** |
| 201 | * ol_tx_desc_alloc_wrapper() -allocate tx descriptor |
| 202 | * @pdev: pdev handler |
| 203 | * @vdev: vdev handler |
| 204 | * @msdu_info: msdu handler |
| 205 | * |
| 206 | * Return: tx descriptor or NULL |
| 207 | */ |
| 208 | #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL |
| 209 | struct ol_tx_desc_t * |
| 210 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 211 | struct ol_txrx_vdev_t *vdev, |
| 212 | struct ol_txrx_msdu_info_t *msdu_info) |
| 213 | { |
| 214 | if (cdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt)) |
| 215 | return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool); |
| 216 | else |
| 217 | return ol_tx_desc_alloc(pdev, vdev, vdev->pool); |
| 218 | } |
| 219 | #else |
| 220 | struct ol_tx_desc_t * |
| 221 | ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev, |
| 222 | struct ol_txrx_vdev_t *vdev, |
| 223 | struct ol_txrx_msdu_info_t *msdu_info) |
| 224 | { |
| 225 | return ol_tx_desc_alloc(pdev, vdev, vdev->pool); |
| 226 | } |
| 227 | #endif |
| 228 | #endif |
| 229 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 230 | #ifndef QCA_LL_TX_FLOW_CONTROL_V2 |
| 231 | /** |
| 232 | * ol_tx_desc_free() - put descriptor to freelist |
| 233 | * @pdev: pdev handle |
| 234 | * @tx_desc: tx descriptor |
| 235 | * |
| 236 | * Return: None |
| 237 | */ |
| 238 | void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) |
| 239 | { |
| 240 | cdf_spin_lock_bh(&pdev->tx_mutex); |
| 241 | #if defined(FEATURE_TSO) |
| 242 | if (tx_desc->pkt_type == ol_tx_frm_tso) { |
| 243 | if (cdf_unlikely(tx_desc->tso_desc == NULL)) |
| 244 | cdf_print("%s %d TSO desc is NULL!\n", |
| 245 | __func__, __LINE__); |
| 246 | else |
| 247 | ol_tso_free_segment(pdev, tx_desc->tso_desc); |
| 248 | } |
| 249 | #endif |
| 250 | ol_tx_desc_reset_pkt_type(tx_desc); |
| 251 | ol_tx_desc_reset_timestamp(tx_desc); |
| 252 | |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame^] | 253 | ol_tx_put_desc_global_pool(pdev, tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 254 | #if defined(CONFIG_PER_VDEV_TX_DESC_POOL) |
| 255 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 256 | if ((cdf_atomic_read(&tx_desc->vdev->os_q_paused)) && |
| 257 | (cdf_atomic_read(&tx_desc->vdev->tx_desc_count) < |
| 258 | TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK)) { |
| 259 | /* wakeup netif_queue */ |
| 260 | cdf_atomic_set(&tx_desc->vdev->os_q_paused, 0); |
| 261 | ol_txrx_flow_control_cb(tx_desc->vdev, true); |
| 262 | } |
| 263 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 264 | cdf_atomic_dec(&tx_desc->vdev->tx_desc_count); |
| 265 | tx_desc->vdev = NULL; |
| 266 | #endif |
| 267 | cdf_spin_unlock_bh(&pdev->tx_mutex); |
| 268 | } |
| 269 | |
| 270 | #else |
| 271 | /** |
| 272 | * ol_tx_desc_free() - put descriptor to pool freelist |
| 273 | * @pdev: pdev handle |
| 274 | * @tx_desc: tx descriptor |
| 275 | * |
| 276 | * Return: None |
| 277 | */ |
| 278 | void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) |
| 279 | { |
| 280 | struct ol_tx_flow_pool_t *pool = tx_desc->pool; |
| 281 | |
| 282 | #if defined(FEATURE_TSO) |
| 283 | if (tx_desc->pkt_type == ol_tx_frm_tso) { |
| 284 | if (cdf_unlikely(tx_desc->tso_desc == NULL)) |
| 285 | cdf_print("%s %d TSO desc is NULL!\n", |
| 286 | __func__, __LINE__); |
| 287 | else |
| 288 | ol_tso_free_segment(pdev, tx_desc->tso_desc); |
| 289 | } |
| 290 | #endif |
| 291 | ol_tx_desc_reset_pkt_type(tx_desc); |
| 292 | ol_tx_desc_reset_timestamp(tx_desc); |
| 293 | |
| 294 | cdf_spin_lock_bh(&pool->flow_pool_lock); |
Nirav Shah | 9d7f2e8 | 2015-09-28 11:09:09 -0700 | [diff] [blame^] | 295 | ol_tx_put_desc_flow_pool(pool, tx_desc); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 296 | switch (pool->status) { |
| 297 | case FLOW_POOL_ACTIVE_PAUSED: |
| 298 | if (pool->avail_desc > pool->start_th) { |
| 299 | pdev->pause_cb(pool->member_flow_id, |
| 300 | WLAN_WAKE_ALL_NETIF_QUEUE, |
| 301 | WLAN_DATA_FLOW_CONTROL); |
| 302 | pool->status = FLOW_POOL_ACTIVE_UNPAUSED; |
| 303 | } |
| 304 | break; |
| 305 | case FLOW_POOL_INVALID: |
| 306 | if (pool->avail_desc == pool->flow_pool_size) { |
| 307 | cdf_spin_unlock_bh(&pool->flow_pool_lock); |
| 308 | ol_tx_free_invalid_flow_pool(pool); |
| 309 | cdf_print("%s %d pool is INVALID State!!\n", |
| 310 | __func__, __LINE__); |
| 311 | return; |
| 312 | } |
| 313 | break; |
| 314 | case FLOW_POOL_ACTIVE_UNPAUSED: |
| 315 | break; |
| 316 | default: |
| 317 | cdf_print("%s %d pool is INACTIVE State!!\n", |
| 318 | __func__, __LINE__); |
| 319 | break; |
| 320 | }; |
| 321 | cdf_spin_unlock_bh(&pool->flow_pool_lock); |
| 322 | |
| 323 | } |
| 324 | #endif |
| 325 | |
| 326 | extern void |
| 327 | dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc); |
| 328 | |
| 329 | void |
| 330 | dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len) |
| 331 | { |
| 332 | cdf_print("%s: Pkt: VA 0x%p PA 0x%x len %d\n", __func__, |
| 333 | cdf_nbuf_data(nbuf), nbuf_paddr, len); |
| 334 | print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_NONE, 16, 4, |
| 335 | cdf_nbuf_data(nbuf), len, true); |
| 336 | } |
| 337 | |
| 338 | const uint32_t htt_to_ce_pkt_type[] = { |
| 339 | [htt_pkt_type_raw] = tx_pkt_type_raw, |
| 340 | [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi, |
| 341 | [htt_pkt_type_ethernet] = tx_pkt_type_802_3, |
| 342 | [htt_pkt_type_mgmt] = tx_pkt_type_mgmt, |
| 343 | [htt_pkt_type_eth2] = tx_pkt_type_eth2, |
| 344 | [htt_pkt_num_types] = 0xffffffff |
| 345 | }; |
| 346 | |
| 347 | struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev, |
| 348 | struct ol_txrx_vdev_t *vdev, |
| 349 | cdf_nbuf_t netbuf, |
| 350 | struct ol_txrx_msdu_info_t *msdu_info) |
| 351 | { |
| 352 | struct ol_tx_desc_t *tx_desc; |
| 353 | unsigned int i; |
| 354 | uint32_t num_frags; |
| 355 | |
| 356 | msdu_info->htt.info.vdev_id = vdev->vdev_id; |
| 357 | msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf); |
| 358 | switch (cdf_nbuf_get_exemption_type(netbuf)) { |
| 359 | case CDF_NBUF_EXEMPT_NO_EXEMPTION: |
| 360 | case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: |
| 361 | /* We want to encrypt this frame */ |
| 362 | msdu_info->htt.action.do_encrypt = 1; |
| 363 | break; |
| 364 | case CDF_NBUF_EXEMPT_ALWAYS: |
| 365 | /* We don't want to encrypt this frame */ |
| 366 | msdu_info->htt.action.do_encrypt = 0; |
| 367 | break; |
| 368 | default: |
| 369 | cdf_assert(0); |
| 370 | break; |
| 371 | } |
| 372 | |
| 373 | /* allocate the descriptor */ |
| 374 | tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); |
| 375 | if (!tx_desc) |
| 376 | return NULL; |
| 377 | |
| 378 | /* initialize the SW tx descriptor */ |
| 379 | tx_desc->netbuf = netbuf; |
| 380 | |
| 381 | if (msdu_info->tso_info.is_tso) { |
| 382 | tx_desc->tso_desc = msdu_info->tso_info.curr_seg; |
| 383 | tx_desc->pkt_type = ol_tx_frm_tso; |
| 384 | TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf); |
| 385 | } else { |
| 386 | tx_desc->pkt_type = ol_tx_frm_std; |
| 387 | } |
| 388 | |
| 389 | /* initialize the HW tx descriptor */ |
| 390 | |
| 391 | htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 392 | tx_desc->htt_tx_desc_paddr, |
| 393 | ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt, |
| 394 | &msdu_info->tso_info, |
| 395 | NULL, vdev->opmode == wlan_op_mode_ocb); |
| 396 | |
| 397 | /* |
| 398 | * Initialize the fragmentation descriptor. |
| 399 | * Skip the prefix fragment (HTT tx descriptor) that was added |
| 400 | * during the call to htt_tx_desc_init above. |
| 401 | */ |
| 402 | num_frags = cdf_nbuf_get_num_frags(netbuf); |
| 403 | /* num_frags are expected to be 2 max */ |
| 404 | num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) |
| 405 | ? CVG_NBUF_MAX_EXTRA_FRAGS |
| 406 | : num_frags; |
| 407 | #if defined(HELIUMPLUS_PADDR64) |
| 408 | /* |
| 409 | * Use num_frags - 1, since 1 frag is used to store |
| 410 | * the HTT/HTC descriptor |
| 411 | * Refer to htt_tx_desc_init() |
| 412 | */ |
| 413 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc, |
| 414 | num_frags - 1); |
| 415 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 416 | htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc, |
| 417 | num_frags - 1); |
| 418 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 419 | |
| 420 | if (msdu_info->tso_info.is_tso) { |
| 421 | htt_tx_desc_fill_tso_info(pdev->htt_pdev, |
| 422 | tx_desc->htt_frag_desc, &msdu_info->tso_info); |
| 423 | TXRX_STATS_TSO_SEG_UPDATE(pdev, |
| 424 | msdu_info->tso_info.curr_seg->seg); |
| 425 | } else { |
| 426 | for (i = 1; i < num_frags; i++) { |
| 427 | cdf_size_t frag_len; |
| 428 | uint32_t frag_paddr; |
| 429 | |
| 430 | frag_len = cdf_nbuf_get_frag_len(netbuf, i); |
| 431 | frag_paddr = cdf_nbuf_get_frag_paddr_lo(netbuf, i); |
| 432 | #if defined(HELIUMPLUS_PADDR64) |
| 433 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1, |
| 434 | frag_paddr, frag_len); |
| 435 | #if defined(HELIUMPLUS_DEBUG) |
| 436 | cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n", |
| 437 | __func__, __LINE__, tx_desc->htt_frag_desc, |
| 438 | frag_paddr, frag_len); |
| 439 | dump_pkt(netbuf, frag_paddr, 64); |
| 440 | #endif /* HELIUMPLUS_DEBUG */ |
| 441 | #else /* ! defined(HELIUMPLUSPADDR64) */ |
| 442 | htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1, |
| 443 | frag_paddr, frag_len); |
| 444 | #endif /* defined(HELIUMPLUS_PADDR64) */ |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | #if defined(HELIUMPLUS_DEBUG) |
| 449 | dump_frag_desc("ol_tx_desc_ll()", tx_desc); |
| 450 | #endif |
| 451 | return tx_desc; |
| 452 | } |
| 453 | |
| 454 | void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev, |
| 455 | ol_tx_desc_list *tx_descs, int had_error) |
| 456 | { |
| 457 | struct ol_tx_desc_t *tx_desc, *tmp; |
| 458 | cdf_nbuf_t msdus = NULL; |
| 459 | |
| 460 | TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) { |
| 461 | cdf_nbuf_t msdu = tx_desc->netbuf; |
| 462 | |
| 463 | cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */ |
| 464 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 465 | /* restore original hdr offset */ |
| 466 | OL_TX_RESTORE_HDR(tx_desc, msdu); |
| 467 | #endif |
| 468 | cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_TO_DEVICE); |
| 469 | /* free the tx desc */ |
| 470 | ol_tx_desc_free(pdev, tx_desc); |
| 471 | /* link the netbuf into a list to free as a batch */ |
| 472 | cdf_nbuf_set_next(msdu, msdus); |
| 473 | msdus = msdu; |
| 474 | } |
| 475 | /* free the netbufs as a batch */ |
| 476 | cdf_nbuf_tx_free(msdus, had_error); |
| 477 | } |
| 478 | |
| 479 | void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev, |
| 480 | struct ol_tx_desc_t *tx_desc, int had_error) |
| 481 | { |
| 482 | int mgmt_type; |
| 483 | ol_txrx_mgmt_tx_cb ota_ack_cb; |
| 484 | char *trace_str; |
| 485 | |
| 486 | cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */ |
| 487 | #ifdef QCA_SUPPORT_SW_TXRX_ENCAP |
| 488 | /* restore original hdr offset */ |
| 489 | OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf)); |
| 490 | #endif |
| 491 | trace_str = (had_error) ? "OT:C:F:" : "OT:C:S:"; |
| 492 | cdf_nbuf_trace_update(tx_desc->netbuf, trace_str); |
| 493 | if (tx_desc->pkt_type == ol_tx_frm_no_free) { |
| 494 | /* free the tx desc but don't unmap or free the frame */ |
| 495 | if (pdev->tx_data_callback.func) { |
| 496 | cdf_nbuf_set_next(tx_desc->netbuf, NULL); |
| 497 | pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt, |
| 498 | tx_desc->netbuf, had_error); |
| 499 | ol_tx_desc_free(pdev, tx_desc); |
| 500 | return; |
| 501 | } |
| 502 | /* let the code below unmap and free the frame */ |
| 503 | } |
| 504 | cdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, CDF_DMA_TO_DEVICE); |
| 505 | /* check the frame type to see what kind of special steps are needed */ |
| 506 | if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) && |
| 507 | (tx_desc->pkt_type != 0xff)) { |
| 508 | uint32_t frag_desc_paddr_lo = 0; |
| 509 | |
| 510 | #if defined(HELIUMPLUS_PADDR64) |
| 511 | frag_desc_paddr_lo = tx_desc->htt_frag_desc_paddr; |
| 512 | /* FIX THIS - |
| 513 | * The FW currently has trouble using the host's fragments |
| 514 | * table for management frames. Until this is fixed, |
| 515 | * rather than specifying the fragment table to the FW, |
| 516 | * the host SW will specify just the address of the initial |
| 517 | * fragment. |
| 518 | * Now that the mgmt frame is done, the HTT tx desc's frags |
| 519 | * table pointer needs to be reset. |
| 520 | */ |
| 521 | #if defined(HELIUMPLUS_DEBUG) |
| 522 | cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n", |
Leo Chang | 376398b | 2015-10-23 14:19:02 -0700 | [diff] [blame] | 523 | __func__, __LINE__, tx_desc->id, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 524 | frag_desc_paddr_lo); |
| 525 | #endif /* HELIUMPLUS_DEBUG */ |
| 526 | #endif /* HELIUMPLUS_PADDR64 */ |
| 527 | htt_tx_desc_frags_table_set(pdev->htt_pdev, |
| 528 | tx_desc->htt_tx_desc, 0, |
| 529 | frag_desc_paddr_lo, 1); |
| 530 | |
| 531 | mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE; |
| 532 | /* |
| 533 | * we already checked the value when the mgmt frame was |
| 534 | * provided to the txrx layer. |
| 535 | * no need to check it a 2nd time. |
| 536 | */ |
| 537 | ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb; |
| 538 | if (ota_ack_cb) { |
| 539 | void *ctxt; |
| 540 | ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt; |
| 541 | ota_ack_cb(ctxt, tx_desc->netbuf, had_error); |
| 542 | } |
| 543 | /* free the netbuf */ |
| 544 | cdf_nbuf_free(tx_desc->netbuf); |
| 545 | } else { |
| 546 | /* single regular frame */ |
| 547 | cdf_nbuf_set_next(tx_desc->netbuf, NULL); |
| 548 | cdf_nbuf_tx_free(tx_desc->netbuf, had_error); |
| 549 | } |
| 550 | /* free the tx desc */ |
| 551 | ol_tx_desc_free(pdev, tx_desc); |
| 552 | } |
| 553 | |
| 554 | #if defined(FEATURE_TSO) |
| 555 | /** |
| 556 | * htt_tso_alloc_segment() - function to allocate a TSO segment |
| 557 | * element |
| 558 | * @pdev: HTT pdev |
| 559 | * @tso_seg: This is the output. The TSO segment element. |
| 560 | * |
| 561 | * Allocates a TSO segment element from the free list held in |
| 562 | * the HTT pdev |
| 563 | * |
| 564 | * Return: none |
| 565 | */ |
| 566 | struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev) |
| 567 | { |
| 568 | struct cdf_tso_seg_elem_t *tso_seg = NULL; |
| 569 | |
| 570 | cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
| 571 | if (pdev->tso_seg_pool.freelist) { |
| 572 | pdev->tso_seg_pool.num_free--; |
| 573 | tso_seg = pdev->tso_seg_pool.freelist; |
| 574 | pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next; |
| 575 | } |
| 576 | cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 577 | |
| 578 | return tso_seg; |
| 579 | } |
| 580 | |
| 581 | /** |
| 582 | * ol_tso_free_segment() - function to free a TSO segment |
| 583 | * element |
| 584 | * @pdev: HTT pdev |
| 585 | * @tso_seg: The TSO segment element to be freed |
| 586 | * |
| 587 | * Returns a TSO segment element to the free list held in the |
| 588 | * HTT pdev |
| 589 | * |
| 590 | * Return: none |
| 591 | */ |
| 592 | |
| 593 | void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev, |
| 594 | struct cdf_tso_seg_elem_t *tso_seg) |
| 595 | { |
| 596 | cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex); |
| 597 | tso_seg->next = pdev->tso_seg_pool.freelist; |
| 598 | pdev->tso_seg_pool.freelist = tso_seg; |
| 599 | pdev->tso_seg_pool.num_free++; |
| 600 | cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex); |
| 601 | } |
| 602 | #endif |