blob: 48499e818a8e27f6ece6fde6e0cc004106cd40b7 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre83d373d2015-07-28 16:45:59 -07002 * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <cdf_net_types.h> /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
29#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
30#include <cdf_util.h> /* cdf_assert */
31#include <cdf_lock.h> /* cdf_spinlock */
32#ifdef QCA_COMPUTE_TX_DELAY
33#include <cdf_time.h> /* cdf_system_ticks */
34#endif
35
36#include <ol_htt_tx_api.h> /* htt_tx_desc_id */
37
38#include <ol_txrx_types.h> /* ol_txrx_pdev_t */
39#include <ol_tx_desc.h>
40#include <ol_txrx_internal.h>
41#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
42#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
43#endif
44#include <ol_txrx.h>
45
46#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
47extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
48#endif
49
50#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
51static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
52 struct ol_tx_desc_t *tx_desc)
53{
54 if (tx_desc->pkt_type != 0xff) {
55 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
56 "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
57 __func__, tx_desc->pkt_type, pdev);
58 cdf_assert(0);
59 }
60 if ((uint32_t *) tx_desc->htt_tx_desc <
61 g_dbg_htt_desc_start_addr
62 || (uint32_t *) tx_desc->htt_tx_desc >
63 g_dbg_htt_desc_end_addr) {
64 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
65 "%s Potential htt_desc curruption:0x%p pdev:0x%p\n",
66 __func__, tx_desc->htt_tx_desc, pdev);
67 cdf_assert(0);
68 }
69}
70static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
71{
72 tx_desc->pkt_type = 0xff;
73}
74#ifdef QCA_COMPUTE_TX_DELAY
75static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
76{
77 if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
78 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
79 __func__, tx_desc->entry_timestamp_ticks);
80 cdf_assert(0);
81 }
82 tx_desc->entry_timestamp_ticks = cdf_system_ticks();
83}
84static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
85{
86 tx_desc->entry_timestamp_ticks = 0xffffffff;
87}
88#endif
89#else
90static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
91 struct ol_tx_desc_t *tx_desc)
92{
93 return;
94}
95static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
96{
97 return;
98}
99static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
100{
101 return;
102}
103static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
104{
105 return;
106}
107#endif
108
109#ifndef QCA_LL_TX_FLOW_CONTROL_V2
110/**
111 * ol_tx_desc_alloc() - allocate descriptor from freelist
112 * @pdev: pdev handle
113 * @vdev: vdev handle
114 *
115 * Return: tx descriptor pointer/ NULL in case of error
116 */
117static
118struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
119 struct ol_txrx_vdev_t *vdev)
120{
121 struct ol_tx_desc_t *tx_desc = NULL;
122
123 cdf_spin_lock_bh(&pdev->tx_mutex);
124 if (pdev->tx_desc.freelist) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700125 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800126 ol_tx_desc_sanity_checks(pdev, tx_desc);
127 ol_tx_desc_compute_delay(tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128 }
129 cdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800130 return tx_desc;
131}
132
133/**
134 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
135 * @pdev: pdev handler
136 * @vdev: vdev handler
137 * @msdu_info: msdu handler
138 *
139 * Return: tx descriptor or NULL
140 */
141struct ol_tx_desc_t *
142ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
143 struct ol_txrx_vdev_t *vdev,
144 struct ol_txrx_msdu_info_t *msdu_info)
145{
146 return ol_tx_desc_alloc(pdev, vdev);
147}
148
149#else
150/**
151 * ol_tx_desc_alloc() -allocate tx descriptor
152 * @pdev: pdev handler
153 * @vdev: vdev handler
154 * @pool: flow pool
155 *
156 * Return: tx descriptor or NULL
157 */
158static
159struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
160 struct ol_txrx_vdev_t *vdev,
161 struct ol_tx_flow_pool_t *pool)
162{
163 struct ol_tx_desc_t *tx_desc = NULL;
164
165 if (pool) {
166 cdf_spin_lock_bh(&pool->flow_pool_lock);
167 if (pool->avail_desc) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700168 tx_desc = ol_tx_get_desc_flow_pool(pool);
169 if (cdf_unlikely(pool->avail_desc < pool->stop_th)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170 pool->status = FLOW_POOL_ACTIVE_PAUSED;
171 cdf_spin_unlock_bh(&pool->flow_pool_lock);
172 /* pause network queues */
173 pdev->pause_cb(vdev->vdev_id,
174 WLAN_STOP_ALL_NETIF_QUEUE,
175 WLAN_DATA_FLOW_CONTROL);
176 } else {
177 cdf_spin_unlock_bh(&pool->flow_pool_lock);
178 }
179 ol_tx_desc_sanity_checks(pdev, tx_desc);
180 ol_tx_desc_compute_delay(tx_desc);
181 } else {
182 cdf_spin_unlock_bh(&pool->flow_pool_lock);
183 pdev->pool_stats.pkt_drop_no_desc++;
184 }
185 } else {
186 pdev->pool_stats.pkt_drop_no_pool++;
187 }
188
189 return tx_desc;
190}
191
192/**
193 * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
194 * @pdev: pdev handler
195 * @vdev: vdev handler
196 * @msdu_info: msdu handler
197 *
198 * Return: tx descriptor or NULL
199 */
200#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
201struct ol_tx_desc_t *
202ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
203 struct ol_txrx_vdev_t *vdev,
204 struct ol_txrx_msdu_info_t *msdu_info)
205{
206 if (cdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
207 return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
208 else
209 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
210}
211#else
212struct ol_tx_desc_t *
213ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
214 struct ol_txrx_vdev_t *vdev,
215 struct ol_txrx_msdu_info_t *msdu_info)
216{
217 return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
218}
219#endif
220#endif
221
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222#ifndef QCA_LL_TX_FLOW_CONTROL_V2
223/**
224 * ol_tx_desc_free() - put descriptor to freelist
225 * @pdev: pdev handle
226 * @tx_desc: tx descriptor
227 *
228 * Return: None
229 */
230void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
231{
232 cdf_spin_lock_bh(&pdev->tx_mutex);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700233
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234 if (tx_desc->pkt_type == ol_tx_frm_tso) {
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700235 if (cdf_unlikely(tx_desc->tso_desc == NULL)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 cdf_print("%s %d TSO desc is NULL!\n",
237 __func__, __LINE__);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700238 cdf_assert(0);
239 } else {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 ol_tso_free_segment(pdev, tx_desc->tso_desc);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700241 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800242 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243 ol_tx_desc_reset_pkt_type(tx_desc);
244 ol_tx_desc_reset_timestamp(tx_desc);
245
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700246 ol_tx_put_desc_global_pool(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800247 cdf_spin_unlock_bh(&pdev->tx_mutex);
248}
249
250#else
251/**
252 * ol_tx_desc_free() - put descriptor to pool freelist
253 * @pdev: pdev handle
254 * @tx_desc: tx descriptor
255 *
256 * Return: None
257 */
258void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
259{
260 struct ol_tx_flow_pool_t *pool = tx_desc->pool;
261
262#if defined(FEATURE_TSO)
263 if (tx_desc->pkt_type == ol_tx_frm_tso) {
264 if (cdf_unlikely(tx_desc->tso_desc == NULL))
265 cdf_print("%s %d TSO desc is NULL!\n",
266 __func__, __LINE__);
267 else
268 ol_tso_free_segment(pdev, tx_desc->tso_desc);
269 }
270#endif
271 ol_tx_desc_reset_pkt_type(tx_desc);
272 ol_tx_desc_reset_timestamp(tx_desc);
273
274 cdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700275 ol_tx_put_desc_flow_pool(pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276 switch (pool->status) {
277 case FLOW_POOL_ACTIVE_PAUSED:
278 if (pool->avail_desc > pool->start_th) {
279 pdev->pause_cb(pool->member_flow_id,
280 WLAN_WAKE_ALL_NETIF_QUEUE,
281 WLAN_DATA_FLOW_CONTROL);
282 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
283 }
284 break;
285 case FLOW_POOL_INVALID:
286 if (pool->avail_desc == pool->flow_pool_size) {
287 cdf_spin_unlock_bh(&pool->flow_pool_lock);
288 ol_tx_free_invalid_flow_pool(pool);
289 cdf_print("%s %d pool is INVALID State!!\n",
290 __func__, __LINE__);
291 return;
292 }
293 break;
294 case FLOW_POOL_ACTIVE_UNPAUSED:
295 break;
296 default:
297 cdf_print("%s %d pool is INACTIVE State!!\n",
298 __func__, __LINE__);
299 break;
300 };
301 cdf_spin_unlock_bh(&pool->flow_pool_lock);
302
303}
304#endif
305
306extern void
307dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
308
309void
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800310dump_pkt(cdf_nbuf_t nbuf, cdf_dma_addr_t nbuf_paddr, int len)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800311{
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800312 cdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800313 cdf_nbuf_data(nbuf), nbuf_paddr, len);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800314 print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_ADDRESS, 16, 4,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315 cdf_nbuf_data(nbuf), len, true);
316}
317
318const uint32_t htt_to_ce_pkt_type[] = {
319 [htt_pkt_type_raw] = tx_pkt_type_raw,
320 [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
321 [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
322 [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
323 [htt_pkt_type_eth2] = tx_pkt_type_eth2,
324 [htt_pkt_num_types] = 0xffffffff
325};
326
327struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
328 struct ol_txrx_vdev_t *vdev,
329 cdf_nbuf_t netbuf,
330 struct ol_txrx_msdu_info_t *msdu_info)
331{
332 struct ol_tx_desc_t *tx_desc;
333 unsigned int i;
334 uint32_t num_frags;
335
336 msdu_info->htt.info.vdev_id = vdev->vdev_id;
337 msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf);
338 switch (cdf_nbuf_get_exemption_type(netbuf)) {
339 case CDF_NBUF_EXEMPT_NO_EXEMPTION:
340 case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
341 /* We want to encrypt this frame */
342 msdu_info->htt.action.do_encrypt = 1;
343 break;
344 case CDF_NBUF_EXEMPT_ALWAYS:
345 /* We don't want to encrypt this frame */
346 msdu_info->htt.action.do_encrypt = 0;
347 break;
348 default:
349 cdf_assert(0);
350 break;
351 }
352
353 /* allocate the descriptor */
354 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
355 if (!tx_desc)
356 return NULL;
357
358 /* initialize the SW tx descriptor */
359 tx_desc->netbuf = netbuf;
360
361 if (msdu_info->tso_info.is_tso) {
362 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
363 tx_desc->pkt_type = ol_tx_frm_tso;
364 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
365 } else {
366 tx_desc->pkt_type = ol_tx_frm_std;
367 }
368
369 /* initialize the HW tx descriptor */
370
371 htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
372 tx_desc->htt_tx_desc_paddr,
373 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
374 &msdu_info->tso_info,
375 NULL, vdev->opmode == wlan_op_mode_ocb);
376
377 /*
378 * Initialize the fragmentation descriptor.
379 * Skip the prefix fragment (HTT tx descriptor) that was added
380 * during the call to htt_tx_desc_init above.
381 */
382 num_frags = cdf_nbuf_get_num_frags(netbuf);
383 /* num_frags are expected to be 2 max */
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800384 num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
385 ? NBUF_CB_TX_MAX_EXTRA_FRAGS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 : num_frags;
387#if defined(HELIUMPLUS_PADDR64)
388 /*
389 * Use num_frags - 1, since 1 frag is used to store
390 * the HTT/HTC descriptor
391 * Refer to htt_tx_desc_init()
392 */
393 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
394 num_frags - 1);
395#else /* ! defined(HELIUMPLUSPADDR64) */
396 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
397 num_frags - 1);
398#endif /* defined(HELIUMPLUS_PADDR64) */
399
400 if (msdu_info->tso_info.is_tso) {
401 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
402 tx_desc->htt_frag_desc, &msdu_info->tso_info);
403 TXRX_STATS_TSO_SEG_UPDATE(pdev,
404 msdu_info->tso_info.curr_seg->seg);
405 } else {
406 for (i = 1; i < num_frags; i++) {
407 cdf_size_t frag_len;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800408 cdf_dma_addr_t frag_paddr;
409#ifdef HELIUMPLUS_DEBUG
410 void *frag_vaddr;
411 frag_vaddr = cdf_nbuf_get_frag_vaddr(netbuf, i);
412#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413 frag_len = cdf_nbuf_get_frag_len(netbuf, i);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800414 frag_paddr = cdf_nbuf_get_frag_paddr(netbuf, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800415#if defined(HELIUMPLUS_PADDR64)
416 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
417 frag_paddr, frag_len);
418#if defined(HELIUMPLUS_DEBUG)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800419 cdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n",
420 __func__, __LINE__, tx_desc->htt_frag_desc,
421 i-1, frag_vaddr, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422 dump_pkt(netbuf, frag_paddr, 64);
423#endif /* HELIUMPLUS_DEBUG */
424#else /* ! defined(HELIUMPLUSPADDR64) */
425 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
426 frag_paddr, frag_len);
427#endif /* defined(HELIUMPLUS_PADDR64) */
428 }
429 }
430
431#if defined(HELIUMPLUS_DEBUG)
432 dump_frag_desc("ol_tx_desc_ll()", tx_desc);
433#endif
434 return tx_desc;
435}
436
437void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
438 ol_tx_desc_list *tx_descs, int had_error)
439{
440 struct ol_tx_desc_t *tx_desc, *tmp;
441 cdf_nbuf_t msdus = NULL;
442
443 TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
444 cdf_nbuf_t msdu = tx_desc->netbuf;
445
446 cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
447#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
448 /* restore original hdr offset */
449 OL_TX_RESTORE_HDR(tx_desc, msdu);
450#endif
451 cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_TO_DEVICE);
452 /* free the tx desc */
453 ol_tx_desc_free(pdev, tx_desc);
454 /* link the netbuf into a list to free as a batch */
455 cdf_nbuf_set_next(msdu, msdus);
456 msdus = msdu;
457 }
458 /* free the netbufs as a batch */
459 cdf_nbuf_tx_free(msdus, had_error);
460}
461
462void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
463 struct ol_tx_desc_t *tx_desc, int had_error)
464{
465 int mgmt_type;
466 ol_txrx_mgmt_tx_cb ota_ack_cb;
467 char *trace_str;
468
469 cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
470#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
471 /* restore original hdr offset */
472 OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
473#endif
474 trace_str = (had_error) ? "OT:C:F:" : "OT:C:S:";
475 cdf_nbuf_trace_update(tx_desc->netbuf, trace_str);
476 if (tx_desc->pkt_type == ol_tx_frm_no_free) {
477 /* free the tx desc but don't unmap or free the frame */
478 if (pdev->tx_data_callback.func) {
479 cdf_nbuf_set_next(tx_desc->netbuf, NULL);
480 pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
481 tx_desc->netbuf, had_error);
482 ol_tx_desc_free(pdev, tx_desc);
483 return;
484 }
485 /* let the code below unmap and free the frame */
486 }
487 cdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, CDF_DMA_TO_DEVICE);
488 /* check the frame type to see what kind of special steps are needed */
489 if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
490 (tx_desc->pkt_type != 0xff)) {
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800491 cdf_dma_addr_t frag_desc_paddr = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492
493#if defined(HELIUMPLUS_PADDR64)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800494 frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 /* FIX THIS -
496 * The FW currently has trouble using the host's fragments
497 * table for management frames. Until this is fixed,
498 * rather than specifying the fragment table to the FW,
499 * the host SW will specify just the address of the initial
500 * fragment.
501 * Now that the mgmt frame is done, the HTT tx desc's frags
502 * table pointer needs to be reset.
503 */
504#if defined(HELIUMPLUS_DEBUG)
505 cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
Leo Chang376398b2015-10-23 14:19:02 -0700506 __func__, __LINE__, tx_desc->id,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800507 frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508#endif /* HELIUMPLUS_DEBUG */
509#endif /* HELIUMPLUS_PADDR64 */
510 htt_tx_desc_frags_table_set(pdev->htt_pdev,
511 tx_desc->htt_tx_desc, 0,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800512 frag_desc_paddr, 1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513
514 mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
515 /*
516 * we already checked the value when the mgmt frame was
517 * provided to the txrx layer.
518 * no need to check it a 2nd time.
519 */
520 ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
521 if (ota_ack_cb) {
522 void *ctxt;
523 ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
524 ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
525 }
526 /* free the netbuf */
527 cdf_nbuf_free(tx_desc->netbuf);
528 } else {
529 /* single regular frame */
530 cdf_nbuf_set_next(tx_desc->netbuf, NULL);
531 cdf_nbuf_tx_free(tx_desc->netbuf, had_error);
532 }
533 /* free the tx desc */
534 ol_tx_desc_free(pdev, tx_desc);
535}
536
537#if defined(FEATURE_TSO)
538/**
539 * htt_tso_alloc_segment() - function to allocate a TSO segment
540 * element
541 * @pdev: HTT pdev
542 * @tso_seg: This is the output. The TSO segment element.
543 *
544 * Allocates a TSO segment element from the free list held in
545 * the HTT pdev
546 *
547 * Return: none
548 */
549struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
550{
551 struct cdf_tso_seg_elem_t *tso_seg = NULL;
552
553 cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
554 if (pdev->tso_seg_pool.freelist) {
555 pdev->tso_seg_pool.num_free--;
556 tso_seg = pdev->tso_seg_pool.freelist;
557 pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
558 }
559 cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560
561 return tso_seg;
562}
563
564/**
565 * ol_tso_free_segment() - function to free a TSO segment
566 * element
567 * @pdev: HTT pdev
568 * @tso_seg: The TSO segment element to be freed
569 *
570 * Returns a TSO segment element to the free list held in the
571 * HTT pdev
572 *
573 * Return: none
574 */
575
576void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
577 struct cdf_tso_seg_elem_t *tso_seg)
578{
579 cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
580 tso_seg->next = pdev->tso_seg_pool.freelist;
581 pdev->tso_seg_pool.freelist = tso_seg;
582 pdev->tso_seg_pool.num_free++;
583 cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
584}
585#endif