blob: eb9cf21165d204779550004b2028eacc43b309de [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49
50#ifdef WLAN_FEATURE_FASTPATH
51#include <hif.h> /* HIF_DEVICE */
52#include <htc_api.h> /* Layering violation, but required for fast path */
53#include <htt_internal.h>
54#include <htt_types.h> /* htc_endpoint */
55
Nirav Shahcbc6d722016-03-01 16:24:53 +053056int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057 unsigned int num_msdus, unsigned int transfer_id);
58#endif /* WLAN_FEATURE_FASTPATH */
59
60/*
61 * The TXRX module doesn't accept tx frames unless the target has
62 * enough descriptors for them.
63 * For LL, the TXRX descriptor pool is sized to match the target's
64 * descriptor pool. Hence, if the descriptor allocation in TXRX
65 * succeeds, that guarantees that the target has room to accept
66 * the new tx frame.
67 */
68#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
69 do { \
70 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
71 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
72 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053073 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074 TXRX_STATS_MSDU_LIST_INCR( \
75 pdev, tx.dropped.host_reject, msdu); \
76 return msdu; /* the list of unaccepted MSDUs */ \
77 } \
78 } while (0)
79
Dhanashri Atre83d373d2015-07-28 16:45:59 -070080#if defined(FEATURE_TSO)
81/**
82 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
83 * related information in the msdu_info meta data
84 * @vdev: virtual device handle
85 * @msdu: network buffer
86 * @msdu_info: meta data associated with the msdu
87 *
88 * Return: 0 - success, >0 - error
89 */
90static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053091 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070092{
93 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053094 if (qdf_nbuf_is_tso(msdu)) {
95 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -070096 msdu_info->tso_info.tso_seg_list = NULL;
97 msdu_info->tso_info.num_segs = num_seg;
98 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053099 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700100 ol_tso_alloc_segment(vdev->pdev);
101 if (tso_seg) {
102 tso_seg->next =
103 msdu_info->tso_info.tso_seg_list;
104 msdu_info->tso_info.tso_seg_list
105 = tso_seg;
106 num_seg--;
107 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530108 struct qdf_tso_seg_elem_t *next_seg;
109 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700110 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530111 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700112 while (free_seg) {
113 next_seg = free_seg->next;
114 ol_tso_free_segment(vdev->pdev,
115 free_seg);
116 free_seg = next_seg;
117 }
118 return 1;
119 }
120 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530121 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700122 msdu, &(msdu_info->tso_info));
123 msdu_info->tso_info.curr_seg =
124 msdu_info->tso_info.tso_seg_list;
125 num_seg = msdu_info->tso_info.num_segs;
126 } else {
127 msdu_info->tso_info.is_tso = 0;
128 msdu_info->tso_info.num_segs = 1;
129 }
130 return 0;
131}
132#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800133
134/**
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800135 * ol_tx_data() - send data frame
136 * @vdev: virtual device handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137 * @skb: skb
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800138 *
139 * Return: skb/NULL for success
140 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800141qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800142{
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530143 void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800144 struct ol_txrx_pdev_t *pdev = vdev->pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530145 qdf_nbuf_t ret;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530146 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147
Anurag Chouhanc5548422016-02-24 18:33:27 +0530148 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530149 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800150 "%s:pdev is null", __func__);
151 return skb;
152 }
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530153 if (qdf_unlikely(!qdf_ctx)) {
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800154 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530155 "%s:qdf_ctx is null", __func__);
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800156 return skb;
157 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158
Nirav Shahcbc6d722016-03-01 16:24:53 +0530159 status = qdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530160 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530161 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162 "%s: nbuf map failed", __func__);
163 return skb;
164 }
165
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800166 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530167 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
168 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
169 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170
171 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530172 qdf_nbuf_set_next(skb, NULL);
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800173 ret = OL_TX_LL(vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530175 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 "%s: Failed to tx", __func__);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530177 qdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800178 return ret;
179 }
180
181 return NULL;
182}
183
184#ifdef IPA_OFFLOAD
185/**
186 * ol_tx_send_ipa_data_frame() - send IPA data frame
187 * @vdev: vdev
188 * @skb: skb
189 *
190 * Return: skb/ NULL is for success
191 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530192qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
193 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530195 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530196 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197
Anurag Chouhanc5548422016-02-24 18:33:27 +0530198 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
200 "%s: pdev is NULL", __func__);
201 return skb;
202 }
203
204 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530205 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
206 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
207 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208
209 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530210 qdf_nbuf_set_next(skb, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
212 if (ret) {
213 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
214 "%s: Failed to tx", __func__);
215 return ret;
216 }
217
218 return NULL;
219}
220#endif
221
222
223#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530224qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800225{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530226 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800227 struct ol_txrx_msdu_info_t msdu_info;
228
229 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
230 msdu_info.htt.action.tx_comp_req = 0;
231 /*
232 * The msdu_list variable could be used instead of the msdu var,
233 * but just to clarify which operations are done on a single MSDU
234 * vs. a list of MSDUs, use a distinct variable for single MSDUs
235 * within the list.
236 */
237 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530238 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800239 struct ol_tx_desc_t *tx_desc;
240 int segments = 1;
241
Nirav Shahcbc6d722016-03-01 16:24:53 +0530242 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243 msdu_info.peer = NULL;
244
Anurag Chouhanc5548422016-02-24 18:33:27 +0530245 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530246 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700247 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
248 tx.dropped.host_reject, msdu);
249 return msdu;
250 }
251
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 segments = msdu_info.tso_info.num_segs;
253
254 /*
255 * The netbuf may get linked into a different list inside the
256 * ol_tx_send function, so store the next pointer before the
257 * tx_send call.
258 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530259 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260 /* init the current segment to the 1st segment in the list */
261 while (segments) {
262
263 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530264 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800265 msdu_info.tso_info.curr_seg->
266 seg.tso_frags[0].paddr_low_32;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267
268 segments--;
269
270 /**
271 * if this is a jumbo nbuf, then increment the number
272 * of nbuf users for each additional segment of the msdu.
273 * This will ensure that the skb is freed only after
274 * receiving tx completion for all segments of an nbuf
275 */
276 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530277 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278
279 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
280
281 /*
282 * If debug display is enabled, show the meta-data being
283 * downloaded to the target via the HTT tx descriptor.
284 */
285 htt_tx_desc_display(tx_desc->htt_tx_desc);
286
287 ol_tx_send(vdev->pdev, tx_desc, msdu);
288
289 if (msdu_info.tso_info.curr_seg) {
290 msdu_info.tso_info.curr_seg =
291 msdu_info.tso_info.curr_seg->next;
292 }
293
Nirav Shahcbc6d722016-03-01 16:24:53 +0530294 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295
296 if (msdu_info.tso_info.is_tso) {
297 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
298 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
299 }
300 } /* while segments */
301
302 msdu = next;
303 if (msdu_info.tso_info.is_tso) {
304 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
305 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
306 }
307 } /* while msdus */
308 return NULL; /* all MSDUs were accepted */
309}
310#else /* TSO */
311
Nirav Shahcbc6d722016-03-01 16:24:53 +0530312qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800313{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530314 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315 struct ol_txrx_msdu_info_t msdu_info;
316
317 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
318 msdu_info.htt.action.tx_comp_req = 0;
319 msdu_info.tso_info.is_tso = 0;
320 /*
321 * The msdu_list variable could be used instead of the msdu var,
322 * but just to clarify which operations are done on a single MSDU
323 * vs. a list of MSDUs, use a distinct variable for single MSDUs
324 * within the list.
325 */
326 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530327 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328 struct ol_tx_desc_t *tx_desc;
329
Nirav Shahcbc6d722016-03-01 16:24:53 +0530330 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331 msdu_info.peer = NULL;
332 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
333
334 /*
335 * If debug display is enabled, show the meta-data being
336 * downloaded to the target via the HTT tx descriptor.
337 */
338 htt_tx_desc_display(tx_desc->htt_tx_desc);
339 /*
340 * The netbuf may get linked into a different list inside the
341 * ol_tx_send function, so store the next pointer before the
342 * tx_send call.
343 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530344 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 ol_tx_send(vdev->pdev, tx_desc, msdu);
346 msdu = next;
347 }
348 return NULL; /* all MSDUs were accepted */
349}
350#endif /* TSO */
351
352#ifdef WLAN_FEATURE_FASTPATH
353/**
354 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
355 *
356 * Allocate and prepare Tx descriptor with msdu and fragment descritor
357 * inforamtion.
358 *
359 * @pdev: pointer to ol pdev handle
360 * @vdev: pointer to ol vdev handle
361 * @msdu: linked list of msdu packets
362 * @pkt_download_len: packet download length
363 * @ep_id: endpoint ID
364 * @msdu_info: Handle to msdu_info
365 *
366 * Return: Pointer to Tx descriptor
367 */
368static inline struct ol_tx_desc_t *
369ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530370 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371 uint32_t pkt_download_len, uint32_t ep_id,
372 struct ol_txrx_msdu_info_t *msdu_info)
373{
374 struct ol_tx_desc_t *tx_desc = NULL;
375 uint32_t *htt_tx_desc;
376 void *htc_hdr_vaddr;
377 u_int32_t num_frags, i;
378
379 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530380 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800381 return NULL;
382
383 tx_desc->netbuf = msdu;
384 if (msdu_info->tso_info.is_tso) {
385 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
386 tx_desc->pkt_type = ol_tx_frm_tso;
387 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
388 } else {
389 tx_desc->pkt_type = ol_tx_frm_std;
390 }
391
392 htt_tx_desc = tx_desc->htt_tx_desc;
393
394 /* Make sure frags num is set to 0 */
395 /*
396 * Do this here rather than in hardstart, so
397 * that we can hopefully take only one cache-miss while
398 * accessing skb->cb.
399 */
400
401 /* HTT Header */
402 /* TODO : Take care of multiple fragments */
403
404 /* TODO: Precompute and store paddr in ol_tx_desc_t */
405 /* Virtual address of the HTT/HTC header, added by driver */
406 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
407 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
408 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
409 &msdu_info->htt, &msdu_info->tso_info,
410 NULL, vdev->opmode == wlan_op_mode_ocb);
411
Nirav Shahcbc6d722016-03-01 16:24:53 +0530412 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530414 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
415 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800416 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417#if defined(HELIUMPLUS_PADDR64)
418 /*
419 * Use num_frags - 1, since 1 frag is used to store
420 * the HTT/HTC descriptor
421 * Refer to htt_tx_desc_init()
422 */
423 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
424 num_frags - 1);
425#else /* ! defined(HELIUMPLUSPADDR64) */
426 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
427 num_frags-1);
428#endif /* defined(HELIUMPLUS_PADDR64) */
429 if (msdu_info->tso_info.is_tso) {
430 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
431 tx_desc->htt_frag_desc, &msdu_info->tso_info);
432 TXRX_STATS_TSO_SEG_UPDATE(pdev,
433 msdu_info->tso_info.curr_seg->seg);
434 } else {
435 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530436 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530437 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800438
Nirav Shahcbc6d722016-03-01 16:24:53 +0530439 frag_len = qdf_nbuf_get_frag_len(msdu, i);
440 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441#if defined(HELIUMPLUS_PADDR64)
442 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
443 i - 1, frag_paddr, frag_len);
444#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530445 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800447 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448 dump_pkt(netbuf, frag_paddr, 64);
449#endif /* HELIUMPLUS_DEBUG */
450#else /* ! defined(HELIUMPLUSPADDR64) */
451 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
452 i - 1, frag_paddr, frag_len);
453#endif /* defined(HELIUMPLUS_PADDR64) */
454 }
455 }
456
457 /*
458 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
459 * this is not required. We still have to mark the swap bit correctly,
460 * when posting to the ring
461 */
462 /* Check to make sure, data download length is correct */
463
464 /*
465 * TODO : Can we remove this check and always download a fixed length ?
466 * */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530467 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
468 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800469
470 /* Fill the HTC header information */
471 /*
472 * Passing 0 as the seq_no field, we can probably get away
473 * with it for the time being, since this is not checked in f/w
474 */
475 /* TODO : Prefill this, look at multi-fragment case */
476 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
477
478 return tx_desc;
479}
480#if defined(FEATURE_TSO)
481/**
482 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
483 *
484 * @vdev: handle to ol_txrx_vdev_t
485 * @msdu_list: msdu list to be sent out.
486 *
487 * Return: on success return NULL, pointer to nbuf when it fails to send.
488 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530489qdf_nbuf_t
490ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530492 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800493 struct ol_txrx_pdev_t *pdev = vdev->pdev;
494 uint32_t pkt_download_len =
495 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
496 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
497 struct ol_txrx_msdu_info_t msdu_info;
498
499 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
500 msdu_info.htt.action.tx_comp_req = 0;
501 /*
502 * The msdu_list variable could be used instead of the msdu var,
503 * but just to clarify which operations are done on a single MSDU
504 * vs. a list of MSDUs, use a distinct variable for single MSDUs
505 * within the list.
506 */
507 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530508 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800509 struct ol_tx_desc_t *tx_desc;
510 int segments = 1;
511
Nirav Shahcbc6d722016-03-01 16:24:53 +0530512 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513 msdu_info.peer = NULL;
514
Anurag Chouhanc5548422016-02-24 18:33:27 +0530515 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530516 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700517 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
518 tx.dropped.host_reject, msdu);
519 return msdu;
520 }
521
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 segments = msdu_info.tso_info.num_segs;
523
524 /*
525 * The netbuf may get linked into a different list
526 * inside the ce_send_fast function, so store the next
527 * pointer before the ce_send call.
528 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530529 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800530 /* init the current segment to the 1st segment in the list */
531 while (segments) {
532
533 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530534 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800535 curr_seg->seg.tso_frags[0].paddr_low_32;
536
537 segments--;
538
539 /**
540 * if this is a jumbo nbuf, then increment the number
541 * of nbuf users for each additional segment of the msdu.
542 * This will ensure that the skb is freed only after
543 * receiving tx completion for all segments of an nbuf
544 */
545 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530546 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547
548 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
549 msdu_info.htt.info.vdev_id = vdev->vdev_id;
550 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530551 qdf_nbuf_get_tx_cksum(msdu);
552 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530553 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
554 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555 /* We want to encrypt this frame */
556 msdu_info.htt.action.do_encrypt = 1;
557 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530558 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800559 /* We don't want to encrypt this frame */
560 msdu_info.htt.action.do_encrypt = 0;
561 break;
562 default:
563 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530564 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800565 break;
566 }
567
568 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
569 pkt_download_len, ep_id,
570 &msdu_info);
571
Anurag Chouhanc5548422016-02-24 18:33:27 +0530572 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 /*
574 * If debug display is enabled, show the meta
575 * data being downloaded to the target via the
576 * HTT tx descriptor.
577 */
578 htt_tx_desc_display(tx_desc->htt_tx_desc);
579 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
580 1, ep_id))) {
581 /*
582 * The packet could not be sent.
583 * Free the descriptor, return the
584 * packet to the caller.
585 */
586 ol_tx_desc_free(pdev, tx_desc);
587 return msdu;
588 }
589 if (msdu_info.tso_info.curr_seg) {
590 msdu_info.tso_info.curr_seg =
591 msdu_info.tso_info.curr_seg->next;
592 }
593
594 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530595 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
597 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
598 }
599 } else {
600 TXRX_STATS_MSDU_LIST_INCR(
601 pdev, tx.dropped.host_reject, msdu);
602 /* the list of unaccepted MSDUs */
603 return msdu;
604 }
605 } /* while segments */
606
607 msdu = next;
608 if (msdu_info.tso_info.is_tso) {
609 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
610 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
611 }
612 } /* while msdus */
613 return NULL; /* all MSDUs were accepted */
614}
615#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530616qdf_nbuf_t
617ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800618{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530619 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800620 struct ol_txrx_pdev_t *pdev = vdev->pdev;
621 uint32_t pkt_download_len =
622 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
623 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
624 struct ol_txrx_msdu_info_t msdu_info;
625
626 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
627 msdu_info.htt.action.tx_comp_req = 0;
628 msdu_info.tso_info.is_tso = 0;
629 /*
630 * The msdu_list variable could be used instead of the msdu var,
631 * but just to clarify which operations are done on a single MSDU
632 * vs. a list of MSDUs, use a distinct variable for single MSDUs
633 * within the list.
634 */
635 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530636 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637 struct ol_tx_desc_t *tx_desc;
638
Nirav Shahcbc6d722016-03-01 16:24:53 +0530639 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800640 msdu_info.peer = NULL;
641
642 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
643 msdu_info.htt.info.vdev_id = vdev->vdev_id;
644 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530645 qdf_nbuf_get_tx_cksum(msdu);
646 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530647 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
648 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800649 /* We want to encrypt this frame */
650 msdu_info.htt.action.do_encrypt = 1;
651 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530652 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653 /* We don't want to encrypt this frame */
654 msdu_info.htt.action.do_encrypt = 0;
655 break;
656 default:
657 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530658 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659 break;
660 }
661
662 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
663 pkt_download_len, ep_id,
664 &msdu_info);
665
Anurag Chouhanc5548422016-02-24 18:33:27 +0530666 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800667 /*
668 * If debug display is enabled, show the meta-data being
669 * downloaded to the target via the HTT tx descriptor.
670 */
671 htt_tx_desc_display(tx_desc->htt_tx_desc);
672 /*
673 * The netbuf may get linked into a different list
674 * inside the ce_send_fast function, so store the next
675 * pointer before the ce_send call.
676 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530677 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800678 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
679 ep_id))) {
680 /* The packet could not be sent */
681 /* Free the descriptor, return the packet to the
682 * caller */
683 ol_tx_desc_free(pdev, tx_desc);
684 return msdu;
685 }
686 msdu = next;
687 } else {
688 TXRX_STATS_MSDU_LIST_INCR(
689 pdev, tx.dropped.host_reject, msdu);
690 return msdu; /* the list of unaccepted MSDUs */
691 }
692 }
693
694 return NULL; /* all MSDUs were accepted */
695}
696#endif /* FEATURE_TSO */
697#endif /* WLAN_FEATURE_FASTPATH */
698
699#ifdef WLAN_FEATURE_FASTPATH
700/**
701 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
702 *
703 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530704static inline qdf_nbuf_t
705ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706{
Komal Seelam3d202862016-02-24 18:43:24 +0530707 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530708 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709
Anurag Chouhanc5548422016-02-24 18:33:27 +0530710 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800711 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
712 else
713 msdu_list = ol_tx_ll(vdev, msdu_list);
714
715 return msdu_list;
716}
717#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530718static inline qdf_nbuf_t
719ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720{
721 return ol_tx_ll(vdev, msdu_list);
722}
723#endif /* WLAN_FEATURE_FASTPATH */
724
725#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
726
727#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
728#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
729static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
730{
731 int max_to_accept;
732
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530733 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800734 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530735 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736 return;
737 }
738
739 /*
740 * Send as much of the backlog as possible, but leave some margin
741 * of unallocated tx descriptors that can be used for new frames
742 * being transmitted by other vdevs.
743 * Ideally there would be a scheduler, which would not only leave
744 * some margin for new frames for other vdevs, but also would
745 * fairly apportion the tx descriptors between multiple vdevs that
746 * have backlogs in their pause queues.
747 * However, the fairness benefit of having a scheduler for frames
748 * from multiple vdev's pause queues is not sufficient to outweigh
749 * the extra complexity.
750 */
751 max_to_accept = vdev->pdev->tx_desc.num_free -
752 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
753 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530754 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800755 max_to_accept--;
756 vdev->ll_pause.txq.depth--;
757 tx_msdu = vdev->ll_pause.txq.head;
758 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530759 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800760 if (NULL == vdev->ll_pause.txq.head)
761 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530762 qdf_nbuf_set_next(tx_msdu, NULL);
763 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
764 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800765 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
766 /*
767 * It is unexpected that ol_tx_ll would reject the frame
768 * since we checked that there's room for it, though
769 * there's an infinitesimal possibility that between the
770 * time we checked the room available and now, a
771 * concurrent batch of tx frames used up all the room.
772 * For simplicity, just drop the frame.
773 */
774 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530775 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530776 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530777 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800778 }
779 }
780 }
781 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530782 qdf_timer_stop(&vdev->ll_pause.timer);
783 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800784 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
785 vdev->ll_pause.is_q_timer_on = true;
786 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
787 vdev->ll_pause.q_overflow_cnt++;
788 }
789
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530790 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800791}
792
Nirav Shahcbc6d722016-03-01 16:24:53 +0530793static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800794ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530795 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530797 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800798 while (msdu_list &&
799 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530800 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
801 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
802 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530803 DPTRACE(qdf_dp_trace(msdu_list,
804 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530805 (uint8_t *)(qdf_nbuf_data(msdu_list)),
806 sizeof(qdf_nbuf_data(msdu_list))));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800807
808 vdev->ll_pause.txq.depth++;
809 if (!vdev->ll_pause.txq.head) {
810 vdev->ll_pause.txq.head = msdu_list;
811 vdev->ll_pause.txq.tail = msdu_list;
812 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530813 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800814 }
815 vdev->ll_pause.txq.tail = msdu_list;
816
817 msdu_list = next;
818 }
819 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530820 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821
822 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530823 qdf_timer_stop(&vdev->ll_pause.timer);
824 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
826 vdev->ll_pause.is_q_timer_on = true;
827 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530828 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829
830 return msdu_list;
831}
832
833/*
834 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
835 * If there are too many frames in the tx queue, reject it.
836 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530837qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838{
839 uint16_t eth_type;
840 uint32_t paused_reason;
841
842 if (msdu_list == NULL)
843 return NULL;
844
845 paused_reason = vdev->ll_pause.paused_reason;
846 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530847 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800848 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
849 paused_reason)) {
850 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530851 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852 ethertype[0] << 8) |
853 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530854 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800855 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
856 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
857 return msdu_list;
858 }
859 }
860 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
861 } else {
862 if (vdev->ll_pause.txq.depth > 0 ||
863 vdev->pdev->tx_throttle.current_throttle_level !=
864 THROTTLE_LEVEL_0) {
865 /* not paused, but there is a backlog of frms
866 from a prior pause or throttle off phase */
867 msdu_list = ol_tx_vdev_pause_queue_append(
868 vdev, msdu_list, 0);
869 /* if throttle is disabled or phase is "on",
870 send the frame */
871 if (vdev->pdev->tx_throttle.current_throttle_level ==
872 THROTTLE_LEVEL_0 ||
873 vdev->pdev->tx_throttle.current_throttle_phase ==
874 THROTTLE_PHASE_ON) {
875 /* send as many frames as possible
876 from the vdevs backlog */
877 ol_tx_vdev_ll_pause_queue_send_base(vdev);
878 }
879 } else {
880 /* not paused, no throttle and no backlog -
881 send the new frames */
882 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
883 }
884 }
885 return msdu_list;
886}
887
888/*
889 * Run through the transmit queues for all the vdevs and
890 * send the pending frames
891 */
892void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
893{
894 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530895 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800896 struct ol_txrx_vdev_t *vdev = NULL;
897 uint8_t more;
898
899 if (NULL == pdev)
900 return;
901
902 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
903 return;
904
905 /* ensure that we send no more than tx_threshold frames at once */
906 max_to_send = pdev->tx_throttle.tx_threshold;
907
908 /* round robin through the vdev queues for the given pdev */
909
910 /* Potential improvement: download several frames from the same vdev
911 at a time, since it is more likely that those frames could be
912 aggregated together, remember which vdev was serviced last,
913 so the next call this function can resume the round-robin
914 traversing where the current invocation left off */
915 do {
916 more = 0;
917 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
918
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530919 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800920 if (vdev->ll_pause.txq.depth) {
921 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530922 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923 mutex);
924 continue;
925 }
926
927 tx_msdu = vdev->ll_pause.txq.head;
928 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530929 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800930 mutex);
931 continue;
932 }
933
934 max_to_send--;
935 vdev->ll_pause.txq.depth--;
936
937 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530938 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939
940 if (NULL == vdev->ll_pause.txq.head)
941 vdev->ll_pause.txq.tail = NULL;
942
Nirav Shahcbc6d722016-03-01 16:24:53 +0530943 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
945 /*
946 * It is unexpected that ol_tx_ll would reject
947 * the frame, since we checked that there's
948 * room for it, though there's an infinitesimal
949 * possibility that between the time we checked
950 * the room available and now, a concurrent
951 * batch of tx frames used up all the room.
952 * For simplicity, just drop the frame.
953 */
954 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530955 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530956 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530957 qdf_nbuf_tx_free(tx_msdu,
958 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800959 }
960 }
961 /*check if there are more msdus to transmit */
962 if (vdev->ll_pause.txq.depth)
963 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530964 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800965 }
966 } while (more && max_to_send);
967
968 vdev = NULL;
969 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530970 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800971 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530972 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
973 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974 &pdev->tx_throttle.tx_timer,
975 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530976 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800977 return;
978 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530979 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 }
981}
982
983void ol_tx_vdev_ll_pause_queue_send(void *context)
984{
985 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
986 struct ol_txrx_pdev_t *pdev = vdev->pdev;
987
988 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
989 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
990 return;
991 ol_tx_vdev_ll_pause_queue_send_base(vdev);
992}
993#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
994
995static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
996{
997 return
998 tx_spec &
999 (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
1000}
1001
1002static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1003{
1004 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1005
1006 if (tx_spec & ol_tx_spec_no_aggr)
1007 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
1008 if (tx_spec & ol_tx_spec_no_encrypt)
1009 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1010 if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
1011 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1012 return sub_type;
1013}
1014
Nirav Shahcbc6d722016-03-01 16:24:53 +05301015qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301017 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001018{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301019 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1021 struct ol_txrx_msdu_info_t msdu_info;
1022
1023 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1024 msdu_info.htt.action.tx_comp_req = 0;
1025
1026 /*
1027 * The msdu_list variable could be used instead of the msdu var,
1028 * but just to clarify which operations are done on a single MSDU
1029 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1030 * within the list.
1031 */
1032 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301033 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001034 struct ol_tx_desc_t *tx_desc;
1035
Nirav Shahcbc6d722016-03-01 16:24:53 +05301036 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001037 msdu_info.peer = NULL;
1038 msdu_info.tso_info.is_tso = 0;
1039
1040 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1041
1042 /*
1043 * The netbuf may get linked into a different list inside the
1044 * ol_tx_send function, so store the next pointer before the
1045 * tx_send call.
1046 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301047 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001048
1049 if (tx_spec != ol_tx_spec_std) {
1050 if (tx_spec & ol_tx_spec_no_free) {
1051 tx_desc->pkt_type = ol_tx_frm_no_free;
1052 } else if (tx_spec & ol_tx_spec_tso) {
1053 tx_desc->pkt_type = ol_tx_frm_tso;
1054 } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
1055 uint8_t sub_type =
1056 ol_txrx_tx_raw_subtype(tx_spec);
1057 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1058 htt_pkt_type_native_wifi,
1059 sub_type);
1060 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1061 /* different types of raw frames */
1062 uint8_t sub_type =
1063 ol_txrx_tx_raw_subtype(tx_spec);
1064 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1065 htt_pkt_type_raw, sub_type);
1066 }
1067 }
1068 /*
1069 * If debug display is enabled, show the meta-data being
1070 * downloaded to the target via the HTT tx descriptor.
1071 */
1072 htt_tx_desc_display(tx_desc->htt_tx_desc);
1073 ol_tx_send(vdev->pdev, tx_desc, msdu);
1074 msdu = next;
1075 }
1076 return NULL; /* all MSDUs were accepted */
1077}
1078
1079#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1080#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1081 do { \
1082 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301083 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1085 if (tx_msdu_info.peer) { \
1086 /* remove the peer reference added above */ \
1087 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1088 } \
1089 goto MSDU_LOOP_BOTTOM; \
1090 } \
1091 } while (0)
1092#else
1093#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1094#endif
1095
1096/* tx filtering is handled within the target FW */
1097#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1098
1099/**
1100 * parse_ocb_tx_header() - Function to check for OCB
1101 * TX control header on a packet and extract it if present
1102 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05301103 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001104 */
1105#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301106bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001107 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1108{
1109 struct ether_header *eth_hdr_p;
1110 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1111
1112 /* Check if TX control header is present */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301113 eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301114 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115 /* TX control header is not present. Nothing to do.. */
1116 return true;
1117
1118 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301119 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001120
1121 /* Parse the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301122 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001123
1124 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1125 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301126 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001127 sizeof(*tx_ctrl_hdr));
1128 } else {
1129 /* The TX control header is invalid. */
1130 return false;
1131 }
1132
1133 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301134 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001135 return true;
1136}
1137
Nirav Shahcbc6d722016-03-01 16:24:53 +05301138qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301140 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001141{
1142 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1143}
1144
1145void
1146ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1147 ol_txrx_data_tx_cb callback, void *ctxt)
1148{
1149 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1150 pdev->tx_data_callback.func = callback;
1151 pdev->tx_data_callback.ctxt = ctxt;
1152}
1153
Dhanashri Atre12a08392016-02-17 13:10:34 -08001154/**
1155 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1156 * notifications for management frames.
1157 *
1158 * @pdev - the data physical device object
1159 * @type - the type of mgmt frame the callback is used for
1160 * @download_cb - the callback for notification of delivery to the target
1161 * @ota_ack_cb - the callback for notification of delivery to the peer
1162 * @ctxt - context to use with the callback
1163 *
1164 * When the txrx SW receives notifications from the target that a tx frame
1165 * has been delivered to its recipient, it will check if the tx frame
1166 * is a management frame. If so, the txrx SW will check the management
1167 * frame type specified when the frame was submitted for transmission.
1168 * If there is a callback function registered for the type of managment
1169 * frame in question, the txrx code will invoke the callback to inform
1170 * the management + control SW that the mgmt frame was delivered.
1171 * This function is used by the control SW to store a callback pointer
1172 * for a given type of management frame.
1173 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001174void
1175ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1176 uint8_t type,
1177 ol_txrx_mgmt_tx_cb download_cb,
1178 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1179{
1180 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1181 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1182 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1183 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1184}
1185
1186#if defined(HELIUMPLUS_PADDR64)
1187void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1188{
1189 uint32_t *frag_ptr_i_p;
1190 int i;
1191
Anurag Chouhan6d760662016-02-20 16:05:43 +05301192 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001193 tx_desc, tx_desc->id);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301194 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001195 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301196 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001197 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001198
1199 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1200 is already de-referrable (=> in virtual address space) */
1201 frag_ptr_i_p = tx_desc->htt_frag_desc;
1202
1203 /* Dump 6 words of TSO flags */
1204 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1205 DUMP_PREFIX_NONE, 8, 4,
1206 frag_ptr_i_p, 24, true);
1207
1208 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1209
1210 i = 0;
1211 while (*frag_ptr_i_p) {
1212 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1213 DUMP_PREFIX_NONE, 8, 4,
1214 frag_ptr_i_p, 8, true);
1215 i++;
1216 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1217 break;
1218 else /* jump to next pointer - skip length */
1219 frag_ptr_i_p += 2;
1220 }
1221 return;
1222}
1223#endif /* HELIUMPLUS_PADDR64 */
1224
Dhanashri Atre12a08392016-02-17 13:10:34 -08001225/**
1226 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1227 *
1228 * @vdev - virtual device transmitting the frame
1229 * @tx_mgmt_frm - management frame to transmit
1230 * @type - the type of managment frame (determines what callback to use)
1231 * @use_6mbps - specify whether management frame to transmit should
1232 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1233 * @chanfreq - channel to transmit the frame on
1234 *
1235 * Send the specified management frame from the specified virtual device.
1236 * The type is used for determining whether to invoke a callback to inform
1237 * the sender that the tx mgmt frame was delivered, and if so, which
1238 * callback to use.
1239 *
1240 * Return: 0 - the frame is accepted for transmission
1241 * 1 - the frame was not accepted
1242 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001243int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001244ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301245 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001246 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1247{
1248 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1249 struct ol_tx_desc_t *tx_desc;
1250 struct ol_txrx_msdu_info_t tx_msdu_info;
1251
1252 tx_msdu_info.tso_info.is_tso = 0;
1253
1254 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1255 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1256 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1257 tx_msdu_info.htt.action.do_tx_complete =
1258 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1259
1260 /*
1261 * FIX THIS: l2_hdr_type should only specify L2 header type
1262 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1263 * that is a combination of L2 header type and 802.11 frame type.
1264 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1265 * But if the 802.11 frame type is "data", then the HTT pkt type is
1266 * the L2 header type (more or less): 802.3 vs. Native WiFi
1267 * (basic 802.11).
1268 * (Or the header type can be "raw", which is any version of the 802.11
1269 * header, and also implies that some of the offloaded tx data
1270 * processing steps may not apply.)
1271 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1272 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1273 * needs to overload the l2_hdr_type to indicate whether the frame is
1274 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1275 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1276 * just the L2 header type. For mgmt frames, there should be a
1277 * separate function to patch the HTT pkt type to store a "mgmt" value
1278 * rather than the L2 header type. Then the HTT pkt type can be
1279 * programmed efficiently for data frames, and the msdu_info's
1280 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1281 * frame type rather than the L2 header type.
1282 */
1283 /*
1284 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1285 * htt_pkt_type_mgmt
1286 * The htt module expects a "enum htt_pkt_type" value.
1287 * The htt_dxe module expects a "enum htt_frm_type" value.
1288 * This needs to be cleaned up, so both versions of htt use a
1289 * consistent method of specifying the frame type.
1290 */
1291#ifdef QCA_SUPPORT_INTEGRATED_SOC
1292 /* tx mgmt frames always come with a 802.11 header */
1293 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1294 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1295#else
1296 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1297 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1298#endif
1299
1300 tx_msdu_info.peer = NULL;
1301
Nirav Shahcbc6d722016-03-01 16:24:53 +05301302 qdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001303 /* For LL tx_comp_req is not used so initialized to 0 */
1304 tx_msdu_info.htt.action.tx_comp_req = 0;
1305 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1306 /* FIX THIS -
1307 * The FW currently has trouble using the host's fragments table
1308 * for management frames. Until this is fixed, rather than
1309 * specifying the fragment table to the FW, specify just the
1310 * address of the initial fragment.
1311 */
1312#if defined(HELIUMPLUS_PADDR64)
1313 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1314 tx_desc); */
1315#endif /* defined(HELIUMPLUS_PADDR64) */
1316 if (tx_desc) {
1317 /*
1318 * Following the call to ol_tx_desc_ll, frag 0 is the
1319 * HTT tx HW descriptor, and the frame payload is in
1320 * frag 1.
1321 */
1322 htt_tx_desc_frags_table_set(
1323 pdev->htt_pdev,
1324 tx_desc->htt_tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301325 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001326 0, 0);
1327#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1328 dump_frag_desc(
1329 "after htt_tx_desc_frags_table_set",
1330 tx_desc);
1331#endif /* defined(HELIUMPLUS_PADDR64) */
1332 }
1333 if (!tx_desc) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301334 qdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301335 QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001336 return -EINVAL; /* can't accept the tx mgmt frame */
1337 }
1338 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1339 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1340 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1341
1342 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301343 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1344 QDF_NBUF_TX_PKT_MGMT_TRACK;
1345 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001346 htt_pkt_type_mgmt);
1347
1348 return 0; /* accepted the tx mgmt frame */
1349}
1350
1351void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1352{
1353 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1354}
1355
Nirav Shahcbc6d722016-03-01 16:24:53 +05301356qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1357 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001358{
1359 struct ol_tx_desc_t *tx_desc;
1360 struct ol_txrx_msdu_info_t msdu_info;
1361
1362 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1363 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1364 msdu_info.peer = NULL;
1365 msdu_info.htt.action.tx_comp_req = 0;
1366 msdu_info.tso_info.is_tso = 0;
1367
1368 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1369 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1370
1371 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1372
1373 ol_tx_send(vdev->pdev, tx_desc, msdu);
1374
1375 return NULL;
1376}
1377
1378#if defined(FEATURE_TSO)
1379void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1380{
1381 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301382 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001383
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301384 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001385 pdev->tso_seg_pool.freelist = c_element;
1386 for (i = 0; i < (num_seg - 1); i++) {
1387 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301388 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001389 c_element = c_element->next;
1390 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001391 }
Leo Chang376398b2015-10-23 14:19:02 -07001392 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301393 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001394}
1395
1396void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1397{
Leo Chang376398b2015-10-23 14:19:02 -07001398 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301399 struct qdf_tso_seg_elem_t *c_element;
1400 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001401
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301402 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001403 c_element = pdev->tso_seg_pool.freelist;
1404 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1405 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301406 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001407 c_element = temp;
1408 if (!c_element)
1409 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410 }
1411
1412 pdev->tso_seg_pool.freelist = NULL;
1413 pdev->tso_seg_pool.num_free = 0;
1414 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301415 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1416 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001417}
1418#endif /* FEATURE_TSO */