blob: 5a9db2d89aa36f9513df2c5c7ebb816e3ced5bdc [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49
50#ifdef WLAN_FEATURE_FASTPATH
51#include <hif.h> /* HIF_DEVICE */
52#include <htc_api.h> /* Layering violation, but required for fast path */
53#include <htt_internal.h>
54#include <htt_types.h> /* htc_endpoint */
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <cdp_txrx_peer_ops.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056
Nirav Shahcbc6d722016-03-01 16:24:53 +053057int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080058 unsigned int num_msdus, unsigned int transfer_id);
59#endif /* WLAN_FEATURE_FASTPATH */
60
61/*
62 * The TXRX module doesn't accept tx frames unless the target has
63 * enough descriptors for them.
64 * For LL, the TXRX descriptor pool is sized to match the target's
65 * descriptor pool. Hence, if the descriptor allocation in TXRX
66 * succeeds, that guarantees that the target has room to accept
67 * the new tx frame.
68 */
69#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
70 do { \
71 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
72 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
73 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053074 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075 TXRX_STATS_MSDU_LIST_INCR( \
76 pdev, tx.dropped.host_reject, msdu); \
77 return msdu; /* the list of unaccepted MSDUs */ \
78 } \
79 } while (0)
80
Dhanashri Atre83d373d2015-07-28 16:45:59 -070081#if defined(FEATURE_TSO)
82/**
83 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
84 * related information in the msdu_info meta data
85 * @vdev: virtual device handle
86 * @msdu: network buffer
87 * @msdu_info: meta data associated with the msdu
88 *
89 * Return: 0 - success, >0 - error
90 */
91static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053092 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070093{
94 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053095 if (qdf_nbuf_is_tso(msdu)) {
96 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -070097 msdu_info->tso_info.tso_seg_list = NULL;
98 msdu_info->tso_info.num_segs = num_seg;
99 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530100 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700101 ol_tso_alloc_segment(vdev->pdev);
102 if (tso_seg) {
103 tso_seg->next =
104 msdu_info->tso_info.tso_seg_list;
105 msdu_info->tso_info.tso_seg_list
106 = tso_seg;
107 num_seg--;
108 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530109 struct qdf_tso_seg_elem_t *next_seg;
110 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700111 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530112 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700113 while (free_seg) {
114 next_seg = free_seg->next;
115 ol_tso_free_segment(vdev->pdev,
116 free_seg);
117 free_seg = next_seg;
118 }
119 return 1;
120 }
121 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530122 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700123 msdu, &(msdu_info->tso_info));
124 msdu_info->tso_info.curr_seg =
125 msdu_info->tso_info.tso_seg_list;
126 num_seg = msdu_info->tso_info.num_segs;
127 } else {
128 msdu_info->tso_info.is_tso = 0;
129 msdu_info->tso_info.num_segs = 1;
130 }
131 return 0;
132}
133#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800134
135/**
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800136 * ol_tx_data() - send data frame
137 * @vdev: virtual device handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800138 * @skb: skb
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800139 *
140 * Return: skb/NULL for success
141 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800142qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143{
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530144 void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
Mohit Khanna043efbd2016-05-04 14:19:35 -0700145 struct ol_txrx_pdev_t *pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530146 qdf_nbuf_t ret;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530147 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148
Mohit Khanna043efbd2016-05-04 14:19:35 -0700149 if (qdf_unlikely(!vdev)) {
150 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
151 "%s:vdev is null", __func__);
152 return skb;
153 } else {
154 pdev = vdev->pdev;
155 }
156
Anurag Chouhanc5548422016-02-24 18:33:27 +0530157 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530158 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159 "%s:pdev is null", __func__);
160 return skb;
161 }
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530162 if (qdf_unlikely(!qdf_ctx)) {
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800163 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530164 "%s:qdf_ctx is null", __func__);
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800165 return skb;
166 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167
Nirav Shahcbc6d722016-03-01 16:24:53 +0530168 status = qdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530169 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171 "%s: nbuf map failed", __func__);
172 return skb;
173 }
174
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530176 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
177 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
178 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800179
180 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530181 qdf_nbuf_set_next(skb, NULL);
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800182 ret = OL_TX_LL(vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800183 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530184 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800185 "%s: Failed to tx", __func__);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530186 qdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 return ret;
188 }
189
190 return NULL;
191}
192
193#ifdef IPA_OFFLOAD
194/**
195 * ol_tx_send_ipa_data_frame() - send IPA data frame
196 * @vdev: vdev
197 * @skb: skb
198 *
199 * Return: skb/ NULL is for success
200 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530201qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
202 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530204 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530205 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800206
Anurag Chouhanc5548422016-02-24 18:33:27 +0530207 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
209 "%s: pdev is NULL", __func__);
210 return skb;
211 }
212
213 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530214 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
215 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
216 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217
218 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530219 qdf_nbuf_set_next(skb, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800220 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
221 if (ret) {
222 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
223 "%s: Failed to tx", __func__);
224 return ret;
225 }
226
227 return NULL;
228}
229#endif
230
231
232#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530233qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530235 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 struct ol_txrx_msdu_info_t msdu_info;
237
238 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
239 msdu_info.htt.action.tx_comp_req = 0;
240 /*
241 * The msdu_list variable could be used instead of the msdu var,
242 * but just to clarify which operations are done on a single MSDU
243 * vs. a list of MSDUs, use a distinct variable for single MSDUs
244 * within the list.
245 */
246 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530247 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248 struct ol_tx_desc_t *tx_desc;
249 int segments = 1;
250
Nirav Shahcbc6d722016-03-01 16:24:53 +0530251 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252 msdu_info.peer = NULL;
253
Anurag Chouhanc5548422016-02-24 18:33:27 +0530254 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530255 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700256 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
257 tx.dropped.host_reject, msdu);
258 return msdu;
259 }
260
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261 segments = msdu_info.tso_info.num_segs;
262
263 /*
264 * The netbuf may get linked into a different list inside the
265 * ol_tx_send function, so store the next pointer before the
266 * tx_send call.
267 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530268 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800269 /* init the current segment to the 1st segment in the list */
270 while (segments) {
271
272 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530273 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800274 msdu_info.tso_info.curr_seg->
275 seg.tso_frags[0].paddr_low_32;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276
277 segments--;
278
279 /**
280 * if this is a jumbo nbuf, then increment the number
281 * of nbuf users for each additional segment of the msdu.
282 * This will ensure that the skb is freed only after
283 * receiving tx completion for all segments of an nbuf
284 */
285 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530286 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800287
288 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
289
290 /*
291 * If debug display is enabled, show the meta-data being
292 * downloaded to the target via the HTT tx descriptor.
293 */
294 htt_tx_desc_display(tx_desc->htt_tx_desc);
295
296 ol_tx_send(vdev->pdev, tx_desc, msdu);
297
298 if (msdu_info.tso_info.curr_seg) {
299 msdu_info.tso_info.curr_seg =
300 msdu_info.tso_info.curr_seg->next;
301 }
302
Nirav Shahcbc6d722016-03-01 16:24:53 +0530303 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304
305 if (msdu_info.tso_info.is_tso) {
306 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
307 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
308 }
309 } /* while segments */
310
311 msdu = next;
312 if (msdu_info.tso_info.is_tso) {
313 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
314 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
315 }
316 } /* while msdus */
317 return NULL; /* all MSDUs were accepted */
318}
319#else /* TSO */
320
Nirav Shahcbc6d722016-03-01 16:24:53 +0530321qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530323 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324 struct ol_txrx_msdu_info_t msdu_info;
325
326 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
327 msdu_info.htt.action.tx_comp_req = 0;
328 msdu_info.tso_info.is_tso = 0;
329 /*
330 * The msdu_list variable could be used instead of the msdu var,
331 * but just to clarify which operations are done on a single MSDU
332 * vs. a list of MSDUs, use a distinct variable for single MSDUs
333 * within the list.
334 */
335 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530336 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337 struct ol_tx_desc_t *tx_desc;
338
Nirav Shahcbc6d722016-03-01 16:24:53 +0530339 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 msdu_info.peer = NULL;
341 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
342
343 /*
344 * If debug display is enabled, show the meta-data being
345 * downloaded to the target via the HTT tx descriptor.
346 */
347 htt_tx_desc_display(tx_desc->htt_tx_desc);
348 /*
349 * The netbuf may get linked into a different list inside the
350 * ol_tx_send function, so store the next pointer before the
351 * tx_send call.
352 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530353 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800354 ol_tx_send(vdev->pdev, tx_desc, msdu);
355 msdu = next;
356 }
357 return NULL; /* all MSDUs were accepted */
358}
359#endif /* TSO */
360
361#ifdef WLAN_FEATURE_FASTPATH
362/**
363 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
364 *
365 * Allocate and prepare Tx descriptor with msdu and fragment descritor
366 * inforamtion.
367 *
368 * @pdev: pointer to ol pdev handle
369 * @vdev: pointer to ol vdev handle
370 * @msdu: linked list of msdu packets
371 * @pkt_download_len: packet download length
372 * @ep_id: endpoint ID
373 * @msdu_info: Handle to msdu_info
374 *
375 * Return: Pointer to Tx descriptor
376 */
377static inline struct ol_tx_desc_t *
378ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530379 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380 uint32_t pkt_download_len, uint32_t ep_id,
381 struct ol_txrx_msdu_info_t *msdu_info)
382{
383 struct ol_tx_desc_t *tx_desc = NULL;
384 uint32_t *htt_tx_desc;
385 void *htc_hdr_vaddr;
386 u_int32_t num_frags, i;
387
388 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530389 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800390 return NULL;
391
392 tx_desc->netbuf = msdu;
393 if (msdu_info->tso_info.is_tso) {
394 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700395 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800396 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
397 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700398 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800399 }
400
401 htt_tx_desc = tx_desc->htt_tx_desc;
402
403 /* Make sure frags num is set to 0 */
404 /*
405 * Do this here rather than in hardstart, so
406 * that we can hopefully take only one cache-miss while
407 * accessing skb->cb.
408 */
409
410 /* HTT Header */
411 /* TODO : Take care of multiple fragments */
412
413 /* TODO: Precompute and store paddr in ol_tx_desc_t */
414 /* Virtual address of the HTT/HTC header, added by driver */
415 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
416 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
417 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
418 &msdu_info->htt, &msdu_info->tso_info,
419 NULL, vdev->opmode == wlan_op_mode_ocb);
420
Nirav Shahcbc6d722016-03-01 16:24:53 +0530421 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530423 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
424 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800425 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426#if defined(HELIUMPLUS_PADDR64)
427 /*
428 * Use num_frags - 1, since 1 frag is used to store
429 * the HTT/HTC descriptor
430 * Refer to htt_tx_desc_init()
431 */
432 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
433 num_frags - 1);
434#else /* ! defined(HELIUMPLUSPADDR64) */
435 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
436 num_frags-1);
437#endif /* defined(HELIUMPLUS_PADDR64) */
438 if (msdu_info->tso_info.is_tso) {
439 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
440 tx_desc->htt_frag_desc, &msdu_info->tso_info);
441 TXRX_STATS_TSO_SEG_UPDATE(pdev,
442 msdu_info->tso_info.curr_seg->seg);
443 } else {
444 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530445 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530446 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447
Nirav Shahcbc6d722016-03-01 16:24:53 +0530448 frag_len = qdf_nbuf_get_frag_len(msdu, i);
449 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450#if defined(HELIUMPLUS_PADDR64)
451 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
452 i - 1, frag_paddr, frag_len);
453#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530454 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800455 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800456 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457 dump_pkt(netbuf, frag_paddr, 64);
458#endif /* HELIUMPLUS_DEBUG */
459#else /* ! defined(HELIUMPLUSPADDR64) */
460 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
461 i - 1, frag_paddr, frag_len);
462#endif /* defined(HELIUMPLUS_PADDR64) */
463 }
464 }
465
466 /*
467 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
468 * this is not required. We still have to mark the swap bit correctly,
469 * when posting to the ring
470 */
471 /* Check to make sure, data download length is correct */
472
473 /*
474 * TODO : Can we remove this check and always download a fixed length ?
475 * */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530476 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
477 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478
479 /* Fill the HTC header information */
480 /*
481 * Passing 0 as the seq_no field, we can probably get away
482 * with it for the time being, since this is not checked in f/w
483 */
484 /* TODO : Prefill this, look at multi-fragment case */
485 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
486
487 return tx_desc;
488}
489#if defined(FEATURE_TSO)
490/**
491 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
492 *
493 * @vdev: handle to ol_txrx_vdev_t
494 * @msdu_list: msdu list to be sent out.
495 *
496 * Return: on success return NULL, pointer to nbuf when it fails to send.
497 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530498qdf_nbuf_t
499ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530501 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 struct ol_txrx_pdev_t *pdev = vdev->pdev;
503 uint32_t pkt_download_len =
504 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
505 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
506 struct ol_txrx_msdu_info_t msdu_info;
507
508 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
509 msdu_info.htt.action.tx_comp_req = 0;
510 /*
511 * The msdu_list variable could be used instead of the msdu var,
512 * but just to clarify which operations are done on a single MSDU
513 * vs. a list of MSDUs, use a distinct variable for single MSDUs
514 * within the list.
515 */
516 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530517 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800518 struct ol_tx_desc_t *tx_desc;
519 int segments = 1;
520
Nirav Shahcbc6d722016-03-01 16:24:53 +0530521 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800522 msdu_info.peer = NULL;
523
Anurag Chouhanc5548422016-02-24 18:33:27 +0530524 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530525 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700526 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
527 tx.dropped.host_reject, msdu);
528 return msdu;
529 }
530
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 segments = msdu_info.tso_info.num_segs;
532
533 /*
534 * The netbuf may get linked into a different list
535 * inside the ce_send_fast function, so store the next
536 * pointer before the ce_send call.
537 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530538 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800539 /* init the current segment to the 1st segment in the list */
540 while (segments) {
541
542 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530543 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544 curr_seg->seg.tso_frags[0].paddr_low_32;
545
546 segments--;
547
548 /**
549 * if this is a jumbo nbuf, then increment the number
550 * of nbuf users for each additional segment of the msdu.
551 * This will ensure that the skb is freed only after
552 * receiving tx completion for all segments of an nbuf
553 */
554 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530555 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556
557 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
558 msdu_info.htt.info.vdev_id = vdev->vdev_id;
559 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530560 qdf_nbuf_get_tx_cksum(msdu);
561 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530562 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
563 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800564 /* We want to encrypt this frame */
565 msdu_info.htt.action.do_encrypt = 1;
566 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530567 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800568 /* We don't want to encrypt this frame */
569 msdu_info.htt.action.do_encrypt = 0;
570 break;
571 default:
572 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530573 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800574 break;
575 }
576
577 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
578 pkt_download_len, ep_id,
579 &msdu_info);
580
Anurag Chouhanc5548422016-02-24 18:33:27 +0530581 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800582 /*
583 * If debug display is enabled, show the meta
584 * data being downloaded to the target via the
585 * HTT tx descriptor.
586 */
587 htt_tx_desc_display(tx_desc->htt_tx_desc);
588 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
589 1, ep_id))) {
590 /*
591 * The packet could not be sent.
592 * Free the descriptor, return the
593 * packet to the caller.
594 */
595 ol_tx_desc_free(pdev, tx_desc);
596 return msdu;
597 }
598 if (msdu_info.tso_info.curr_seg) {
599 msdu_info.tso_info.curr_seg =
600 msdu_info.tso_info.curr_seg->next;
601 }
602
603 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530604 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800605 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
606 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
607 }
608 } else {
609 TXRX_STATS_MSDU_LIST_INCR(
610 pdev, tx.dropped.host_reject, msdu);
611 /* the list of unaccepted MSDUs */
612 return msdu;
613 }
614 } /* while segments */
615
616 msdu = next;
617 if (msdu_info.tso_info.is_tso) {
618 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
619 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
620 }
621 } /* while msdus */
622 return NULL; /* all MSDUs were accepted */
623}
624#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530625qdf_nbuf_t
626ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530628 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800629 struct ol_txrx_pdev_t *pdev = vdev->pdev;
630 uint32_t pkt_download_len =
631 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
632 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
633 struct ol_txrx_msdu_info_t msdu_info;
634
635 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
636 msdu_info.htt.action.tx_comp_req = 0;
637 msdu_info.tso_info.is_tso = 0;
638 /*
639 * The msdu_list variable could be used instead of the msdu var,
640 * but just to clarify which operations are done on a single MSDU
641 * vs. a list of MSDUs, use a distinct variable for single MSDUs
642 * within the list.
643 */
644 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530645 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800646 struct ol_tx_desc_t *tx_desc;
647
Nirav Shahcbc6d722016-03-01 16:24:53 +0530648 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800649 msdu_info.peer = NULL;
650
651 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
652 msdu_info.htt.info.vdev_id = vdev->vdev_id;
653 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530654 qdf_nbuf_get_tx_cksum(msdu);
655 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530656 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
657 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658 /* We want to encrypt this frame */
659 msdu_info.htt.action.do_encrypt = 1;
660 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530661 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800662 /* We don't want to encrypt this frame */
663 msdu_info.htt.action.do_encrypt = 0;
664 break;
665 default:
666 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530667 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800668 break;
669 }
670
671 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
672 pkt_download_len, ep_id,
673 &msdu_info);
674
Anurag Chouhanc5548422016-02-24 18:33:27 +0530675 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676 /*
677 * If debug display is enabled, show the meta-data being
678 * downloaded to the target via the HTT tx descriptor.
679 */
680 htt_tx_desc_display(tx_desc->htt_tx_desc);
681 /*
682 * The netbuf may get linked into a different list
683 * inside the ce_send_fast function, so store the next
684 * pointer before the ce_send call.
685 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530686 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800687 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
688 ep_id))) {
689 /* The packet could not be sent */
690 /* Free the descriptor, return the packet to the
691 * caller */
692 ol_tx_desc_free(pdev, tx_desc);
693 return msdu;
694 }
695 msdu = next;
696 } else {
697 TXRX_STATS_MSDU_LIST_INCR(
698 pdev, tx.dropped.host_reject, msdu);
699 return msdu; /* the list of unaccepted MSDUs */
700 }
701 }
702
703 return NULL; /* all MSDUs were accepted */
704}
705#endif /* FEATURE_TSO */
706#endif /* WLAN_FEATURE_FASTPATH */
707
708#ifdef WLAN_FEATURE_FASTPATH
709/**
710 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
711 *
712 */
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800713qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530714ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800715{
Komal Seelam3d202862016-02-24 18:43:24 +0530716 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530717 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718
Anurag Chouhanc5548422016-02-24 18:33:27 +0530719 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
721 else
722 msdu_list = ol_tx_ll(vdev, msdu_list);
723
724 return msdu_list;
725}
726#else
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800727qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530728ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800729{
730 return ol_tx_ll(vdev, msdu_list);
731}
732#endif /* WLAN_FEATURE_FASTPATH */
733
734#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
735
736#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
737#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
738static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
739{
740 int max_to_accept;
741
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530742 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530744 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800745 return;
746 }
747
748 /*
749 * Send as much of the backlog as possible, but leave some margin
750 * of unallocated tx descriptors that can be used for new frames
751 * being transmitted by other vdevs.
752 * Ideally there would be a scheduler, which would not only leave
753 * some margin for new frames for other vdevs, but also would
754 * fairly apportion the tx descriptors between multiple vdevs that
755 * have backlogs in their pause queues.
756 * However, the fairness benefit of having a scheduler for frames
757 * from multiple vdev's pause queues is not sufficient to outweigh
758 * the extra complexity.
759 */
760 max_to_accept = vdev->pdev->tx_desc.num_free -
761 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
762 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530763 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 max_to_accept--;
765 vdev->ll_pause.txq.depth--;
766 tx_msdu = vdev->ll_pause.txq.head;
767 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530768 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800769 if (NULL == vdev->ll_pause.txq.head)
770 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530771 qdf_nbuf_set_next(tx_msdu, NULL);
772 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
773 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800774 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
775 /*
776 * It is unexpected that ol_tx_ll would reject the frame
777 * since we checked that there's room for it, though
778 * there's an infinitesimal possibility that between the
779 * time we checked the room available and now, a
780 * concurrent batch of tx frames used up all the room.
781 * For simplicity, just drop the frame.
782 */
783 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530784 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530785 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530786 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 }
788 }
789 }
790 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530791 qdf_timer_stop(&vdev->ll_pause.timer);
792 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800793 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
794 vdev->ll_pause.is_q_timer_on = true;
795 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
796 vdev->ll_pause.q_overflow_cnt++;
797 }
798
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530799 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800}
801
Nirav Shahcbc6d722016-03-01 16:24:53 +0530802static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800803ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530804 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800805{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530806 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800807 while (msdu_list &&
808 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530809 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
810 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
811 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530812 DPTRACE(qdf_dp_trace(msdu_list,
813 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530814 (uint8_t *)(qdf_nbuf_data(msdu_list)),
815 sizeof(qdf_nbuf_data(msdu_list))));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800816
817 vdev->ll_pause.txq.depth++;
818 if (!vdev->ll_pause.txq.head) {
819 vdev->ll_pause.txq.head = msdu_list;
820 vdev->ll_pause.txq.tail = msdu_list;
821 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530822 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800823 }
824 vdev->ll_pause.txq.tail = msdu_list;
825
826 msdu_list = next;
827 }
828 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530829 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800830
831 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530832 qdf_timer_stop(&vdev->ll_pause.timer);
833 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800834 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
835 vdev->ll_pause.is_q_timer_on = true;
836 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530837 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838
839 return msdu_list;
840}
841
842/*
843 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
844 * If there are too many frames in the tx queue, reject it.
845 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530846qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847{
848 uint16_t eth_type;
849 uint32_t paused_reason;
850
851 if (msdu_list == NULL)
852 return NULL;
853
854 paused_reason = vdev->ll_pause.paused_reason;
855 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530856 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800857 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
858 paused_reason)) {
859 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530860 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800861 ethertype[0] << 8) |
862 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530863 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800864 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
865 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
866 return msdu_list;
867 }
868 }
869 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
870 } else {
871 if (vdev->ll_pause.txq.depth > 0 ||
872 vdev->pdev->tx_throttle.current_throttle_level !=
873 THROTTLE_LEVEL_0) {
874 /* not paused, but there is a backlog of frms
875 from a prior pause or throttle off phase */
876 msdu_list = ol_tx_vdev_pause_queue_append(
877 vdev, msdu_list, 0);
878 /* if throttle is disabled or phase is "on",
879 send the frame */
880 if (vdev->pdev->tx_throttle.current_throttle_level ==
881 THROTTLE_LEVEL_0 ||
882 vdev->pdev->tx_throttle.current_throttle_phase ==
883 THROTTLE_PHASE_ON) {
884 /* send as many frames as possible
885 from the vdevs backlog */
886 ol_tx_vdev_ll_pause_queue_send_base(vdev);
887 }
888 } else {
889 /* not paused, no throttle and no backlog -
890 send the new frames */
891 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
892 }
893 }
894 return msdu_list;
895}
896
897/*
898 * Run through the transmit queues for all the vdevs and
899 * send the pending frames
900 */
901void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
902{
903 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530904 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 struct ol_txrx_vdev_t *vdev = NULL;
906 uint8_t more;
907
908 if (NULL == pdev)
909 return;
910
911 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
912 return;
913
914 /* ensure that we send no more than tx_threshold frames at once */
915 max_to_send = pdev->tx_throttle.tx_threshold;
916
917 /* round robin through the vdev queues for the given pdev */
918
919 /* Potential improvement: download several frames from the same vdev
920 at a time, since it is more likely that those frames could be
921 aggregated together, remember which vdev was serviced last,
922 so the next call this function can resume the round-robin
923 traversing where the current invocation left off */
924 do {
925 more = 0;
926 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
927
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530928 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800929 if (vdev->ll_pause.txq.depth) {
930 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530931 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800932 mutex);
933 continue;
934 }
935
936 tx_msdu = vdev->ll_pause.txq.head;
937 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530938 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939 mutex);
940 continue;
941 }
942
943 max_to_send--;
944 vdev->ll_pause.txq.depth--;
945
946 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530947 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948
949 if (NULL == vdev->ll_pause.txq.head)
950 vdev->ll_pause.txq.tail = NULL;
951
Nirav Shahcbc6d722016-03-01 16:24:53 +0530952 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800953 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
954 /*
955 * It is unexpected that ol_tx_ll would reject
956 * the frame, since we checked that there's
957 * room for it, though there's an infinitesimal
958 * possibility that between the time we checked
959 * the room available and now, a concurrent
960 * batch of tx frames used up all the room.
961 * For simplicity, just drop the frame.
962 */
963 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530964 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530965 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530966 qdf_nbuf_tx_free(tx_msdu,
967 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800968 }
969 }
970 /*check if there are more msdus to transmit */
971 if (vdev->ll_pause.txq.depth)
972 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530973 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974 }
975 } while (more && max_to_send);
976
977 vdev = NULL;
978 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530979 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530981 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
982 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983 &pdev->tx_throttle.tx_timer,
984 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530985 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800986 return;
987 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530988 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989 }
990}
991
992void ol_tx_vdev_ll_pause_queue_send(void *context)
993{
994 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
995 struct ol_txrx_pdev_t *pdev = vdev->pdev;
996
997 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
998 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
999 return;
1000 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1001}
1002#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1003
1004static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1005{
1006 return
1007 tx_spec &
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001008 (OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001009}
1010
1011static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1012{
1013 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1014
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001015 if (tx_spec & OL_TX_SPEC_NO_AGGR)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001017 if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001018 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001019 if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1021 return sub_type;
1022}
1023
Nirav Shahcbc6d722016-03-01 16:24:53 +05301024qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001025ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301026 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301028 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001029 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1030 struct ol_txrx_msdu_info_t msdu_info;
1031
1032 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1033 msdu_info.htt.action.tx_comp_req = 0;
1034
1035 /*
1036 * The msdu_list variable could be used instead of the msdu var,
1037 * but just to clarify which operations are done on a single MSDU
1038 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1039 * within the list.
1040 */
1041 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301042 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043 struct ol_tx_desc_t *tx_desc;
1044
Nirav Shahcbc6d722016-03-01 16:24:53 +05301045 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001046 msdu_info.peer = NULL;
1047 msdu_info.tso_info.is_tso = 0;
1048
1049 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1050
1051 /*
1052 * The netbuf may get linked into a different list inside the
1053 * ol_tx_send function, so store the next pointer before the
1054 * tx_send call.
1055 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301056 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001057
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001058 if (tx_spec != OL_TX_SPEC_STD) {
1059 if (tx_spec & OL_TX_SPEC_NO_FREE) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001060 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001061 } else if (tx_spec & OL_TX_SPEC_TSO) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001062 tx_desc->pkt_type = OL_TX_FRM_TSO;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001063 } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064 uint8_t sub_type =
1065 ol_txrx_tx_raw_subtype(tx_spec);
1066 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1067 htt_pkt_type_native_wifi,
1068 sub_type);
1069 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1070 /* different types of raw frames */
1071 uint8_t sub_type =
1072 ol_txrx_tx_raw_subtype(tx_spec);
1073 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1074 htt_pkt_type_raw, sub_type);
1075 }
1076 }
1077 /*
1078 * If debug display is enabled, show the meta-data being
1079 * downloaded to the target via the HTT tx descriptor.
1080 */
1081 htt_tx_desc_display(tx_desc->htt_tx_desc);
1082 ol_tx_send(vdev->pdev, tx_desc, msdu);
1083 msdu = next;
1084 }
1085 return NULL; /* all MSDUs were accepted */
1086}
1087
1088#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1089#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1090 do { \
1091 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301092 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001093 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1094 if (tx_msdu_info.peer) { \
1095 /* remove the peer reference added above */ \
1096 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1097 } \
1098 goto MSDU_LOOP_BOTTOM; \
1099 } \
1100 } while (0)
1101#else
1102#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1103#endif
1104
1105/* tx filtering is handled within the target FW */
1106#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1107
1108/**
1109 * parse_ocb_tx_header() - Function to check for OCB
1110 * TX control header on a packet and extract it if present
1111 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05301112 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001113 */
1114#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301115bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1117{
1118 struct ether_header *eth_hdr_p;
1119 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1120
1121 /* Check if TX control header is present */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301122 eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301123 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001124 /* TX control header is not present. Nothing to do.. */
1125 return true;
1126
1127 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301128 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001129
1130 /* Parse the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301131 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132
1133 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1134 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301135 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001136 sizeof(*tx_ctrl_hdr));
1137 } else {
1138 /* The TX control header is invalid. */
1139 return false;
1140 }
1141
1142 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301143 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001144 return true;
1145}
1146
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001147/**
1148 * ol_tx_non_std - Allow the control-path SW to send data frames
1149 *
1150 * @data_vdev - which vdev should transmit the tx data frames
1151 * @tx_spec - what non-standard handling to apply to the tx data frames
1152 * @msdu_list - NULL-terminated list of tx MSDUs
1153 *
1154 * Generally, all tx data frames come from the OS shim into the txrx layer.
1155 * However, there are rare cases such as TDLS messaging where the UMAC
1156 * control-path SW creates tx data frames.
1157 * This UMAC SW can call this function to provide the tx data frames to
1158 * the txrx layer.
1159 * The UMAC SW can request a callback for these data frames after their
1160 * transmission completes, by using the ol_txrx_data_tx_cb_set function
1161 * to register a tx completion callback, and by specifying
1162 * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
1163 * ol_tx_non_std.
1164 * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
1165 * as specified by ol_cfg_frame_type().
1166 *
1167 * Return: null - success, skb - failure
1168 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301169qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001170ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301171 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001172{
1173 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1174}
1175
1176void
1177ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1178 ol_txrx_data_tx_cb callback, void *ctxt)
1179{
1180 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1181 pdev->tx_data_callback.func = callback;
1182 pdev->tx_data_callback.ctxt = ctxt;
1183}
1184
Dhanashri Atre12a08392016-02-17 13:10:34 -08001185/**
1186 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1187 * notifications for management frames.
1188 *
1189 * @pdev - the data physical device object
1190 * @type - the type of mgmt frame the callback is used for
1191 * @download_cb - the callback for notification of delivery to the target
1192 * @ota_ack_cb - the callback for notification of delivery to the peer
1193 * @ctxt - context to use with the callback
1194 *
1195 * When the txrx SW receives notifications from the target that a tx frame
1196 * has been delivered to its recipient, it will check if the tx frame
1197 * is a management frame. If so, the txrx SW will check the management
1198 * frame type specified when the frame was submitted for transmission.
1199 * If there is a callback function registered for the type of managment
1200 * frame in question, the txrx code will invoke the callback to inform
1201 * the management + control SW that the mgmt frame was delivered.
1202 * This function is used by the control SW to store a callback pointer
1203 * for a given type of management frame.
1204 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001205void
1206ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1207 uint8_t type,
1208 ol_txrx_mgmt_tx_cb download_cb,
1209 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1210{
1211 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1212 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1213 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1214 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1215}
1216
1217#if defined(HELIUMPLUS_PADDR64)
1218void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1219{
1220 uint32_t *frag_ptr_i_p;
1221 int i;
1222
Anurag Chouhan6d760662016-02-20 16:05:43 +05301223 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001224 tx_desc, tx_desc->id);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301225 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001226 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301227 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001228 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
1230 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1231 is already de-referrable (=> in virtual address space) */
1232 frag_ptr_i_p = tx_desc->htt_frag_desc;
1233
1234 /* Dump 6 words of TSO flags */
1235 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1236 DUMP_PREFIX_NONE, 8, 4,
1237 frag_ptr_i_p, 24, true);
1238
1239 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1240
1241 i = 0;
1242 while (*frag_ptr_i_p) {
1243 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1244 DUMP_PREFIX_NONE, 8, 4,
1245 frag_ptr_i_p, 8, true);
1246 i++;
1247 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1248 break;
1249 else /* jump to next pointer - skip length */
1250 frag_ptr_i_p += 2;
1251 }
1252 return;
1253}
1254#endif /* HELIUMPLUS_PADDR64 */
1255
Dhanashri Atre12a08392016-02-17 13:10:34 -08001256/**
1257 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1258 *
1259 * @vdev - virtual device transmitting the frame
1260 * @tx_mgmt_frm - management frame to transmit
1261 * @type - the type of managment frame (determines what callback to use)
1262 * @use_6mbps - specify whether management frame to transmit should
1263 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1264 * @chanfreq - channel to transmit the frame on
1265 *
1266 * Send the specified management frame from the specified virtual device.
1267 * The type is used for determining whether to invoke a callback to inform
1268 * the sender that the tx mgmt frame was delivered, and if so, which
1269 * callback to use.
1270 *
1271 * Return: 0 - the frame is accepted for transmission
1272 * 1 - the frame was not accepted
1273 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001274int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001275ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301276 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001277 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1278{
1279 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1280 struct ol_tx_desc_t *tx_desc;
1281 struct ol_txrx_msdu_info_t tx_msdu_info;
1282
1283 tx_msdu_info.tso_info.is_tso = 0;
1284
1285 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1286 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1287 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1288 tx_msdu_info.htt.action.do_tx_complete =
1289 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1290
1291 /*
1292 * FIX THIS: l2_hdr_type should only specify L2 header type
1293 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1294 * that is a combination of L2 header type and 802.11 frame type.
1295 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1296 * But if the 802.11 frame type is "data", then the HTT pkt type is
1297 * the L2 header type (more or less): 802.3 vs. Native WiFi
1298 * (basic 802.11).
1299 * (Or the header type can be "raw", which is any version of the 802.11
1300 * header, and also implies that some of the offloaded tx data
1301 * processing steps may not apply.)
1302 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1303 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1304 * needs to overload the l2_hdr_type to indicate whether the frame is
1305 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1306 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1307 * just the L2 header type. For mgmt frames, there should be a
1308 * separate function to patch the HTT pkt type to store a "mgmt" value
1309 * rather than the L2 header type. Then the HTT pkt type can be
1310 * programmed efficiently for data frames, and the msdu_info's
1311 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1312 * frame type rather than the L2 header type.
1313 */
1314 /*
1315 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1316 * htt_pkt_type_mgmt
1317 * The htt module expects a "enum htt_pkt_type" value.
1318 * The htt_dxe module expects a "enum htt_frm_type" value.
1319 * This needs to be cleaned up, so both versions of htt use a
1320 * consistent method of specifying the frame type.
1321 */
1322#ifdef QCA_SUPPORT_INTEGRATED_SOC
1323 /* tx mgmt frames always come with a 802.11 header */
1324 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1325 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1326#else
1327 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1328 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1329#endif
1330
1331 tx_msdu_info.peer = NULL;
1332
Nirav Shahcbc6d722016-03-01 16:24:53 +05301333 qdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001334 /* For LL tx_comp_req is not used so initialized to 0 */
1335 tx_msdu_info.htt.action.tx_comp_req = 0;
1336 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1337 /* FIX THIS -
1338 * The FW currently has trouble using the host's fragments table
1339 * for management frames. Until this is fixed, rather than
1340 * specifying the fragment table to the FW, specify just the
1341 * address of the initial fragment.
1342 */
1343#if defined(HELIUMPLUS_PADDR64)
1344 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1345 tx_desc); */
1346#endif /* defined(HELIUMPLUS_PADDR64) */
1347 if (tx_desc) {
1348 /*
1349 * Following the call to ol_tx_desc_ll, frag 0 is the
1350 * HTT tx HW descriptor, and the frame payload is in
1351 * frag 1.
1352 */
1353 htt_tx_desc_frags_table_set(
1354 pdev->htt_pdev,
1355 tx_desc->htt_tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301356 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001357 0, 0);
1358#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1359 dump_frag_desc(
1360 "after htt_tx_desc_frags_table_set",
1361 tx_desc);
1362#endif /* defined(HELIUMPLUS_PADDR64) */
1363 }
1364 if (!tx_desc) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301365 qdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301366 QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001367 return -EINVAL; /* can't accept the tx mgmt frame */
1368 }
1369 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1370 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1371 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1372
1373 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301374 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1375 QDF_NBUF_TX_PKT_MGMT_TRACK;
1376 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377 htt_pkt_type_mgmt);
1378
1379 return 0; /* accepted the tx mgmt frame */
1380}
1381
1382void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1383{
1384 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1385}
1386
Nirav Shahcbc6d722016-03-01 16:24:53 +05301387qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1388 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001389{
1390 struct ol_tx_desc_t *tx_desc;
1391 struct ol_txrx_msdu_info_t msdu_info;
1392
1393 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1394 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1395 msdu_info.peer = NULL;
1396 msdu_info.htt.action.tx_comp_req = 0;
1397 msdu_info.tso_info.is_tso = 0;
1398
1399 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1400 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1401
1402 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1403
1404 ol_tx_send(vdev->pdev, tx_desc, msdu);
1405
1406 return NULL;
1407}
1408
1409#if defined(FEATURE_TSO)
1410void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1411{
1412 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301413 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001414
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301415 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001416 pdev->tso_seg_pool.freelist = c_element;
1417 for (i = 0; i < (num_seg - 1); i++) {
1418 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301419 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001420 c_element = c_element->next;
1421 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001422 }
Leo Chang376398b2015-10-23 14:19:02 -07001423 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301424 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001425}
1426
1427void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1428{
Leo Chang376398b2015-10-23 14:19:02 -07001429 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301430 struct qdf_tso_seg_elem_t *c_element;
1431 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001432
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301433 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001434 c_element = pdev->tso_seg_pool.freelist;
1435 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1436 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301437 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001438 c_element = temp;
1439 if (!c_element)
1440 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001441 }
1442
1443 pdev->tso_seg_pool.freelist = NULL;
1444 pdev->tso_seg_pool.num_free = 0;
1445 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301446 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1447 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001448}
1449#endif /* FEATURE_TSO */