blob: c2c79fb0b232abdbc65f0a69a63d867a849ef408 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053044#include <ol_tx_classify.h> /* ol_tx_classify, ol_tx_classify_mgmt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include <ol_tx_queue.h> /* ol_tx_enqueue */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053046#include <ol_tx_sched.h> /* ol_tx_sched */
47
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080048
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52
53#ifdef WLAN_FEATURE_FASTPATH
54#include <hif.h> /* HIF_DEVICE */
55#include <htc_api.h> /* Layering violation, but required for fast path */
56#include <htt_internal.h>
57#include <htt_types.h> /* htc_endpoint */
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070058#include <cdp_txrx_peer_ops.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059
Nirav Shah2e583a02016-04-30 14:06:12 +053060int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
61 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062#endif /* WLAN_FEATURE_FASTPATH */
63
64/*
65 * The TXRX module doesn't accept tx frames unless the target has
66 * enough descriptors for them.
67 * For LL, the TXRX descriptor pool is sized to match the target's
68 * descriptor pool. Hence, if the descriptor allocation in TXRX
69 * succeeds, that guarantees that the target has room to accept
70 * the new tx frame.
71 */
72#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
73 do { \
74 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
75 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
76 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053077 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078 TXRX_STATS_MSDU_LIST_INCR( \
79 pdev, tx.dropped.host_reject, msdu); \
80 return msdu; /* the list of unaccepted MSDUs */ \
81 } \
82 } while (0)
83
Dhanashri Atre83d373d2015-07-28 16:45:59 -070084#if defined(FEATURE_TSO)
85/**
86 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
87 * related information in the msdu_info meta data
88 * @vdev: virtual device handle
89 * @msdu: network buffer
90 * @msdu_info: meta data associated with the msdu
91 *
92 * Return: 0 - success, >0 - error
93 */
94static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053095 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070096{
97 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053098 if (qdf_nbuf_is_tso(msdu)) {
99 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700100 msdu_info->tso_info.tso_seg_list = NULL;
101 msdu_info->tso_info.num_segs = num_seg;
102 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530103 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700104 ol_tso_alloc_segment(vdev->pdev);
105 if (tso_seg) {
106 tso_seg->next =
107 msdu_info->tso_info.tso_seg_list;
108 msdu_info->tso_info.tso_seg_list
109 = tso_seg;
110 num_seg--;
111 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530112 struct qdf_tso_seg_elem_t *next_seg;
113 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700114 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530115 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700116 while (free_seg) {
117 next_seg = free_seg->next;
118 ol_tso_free_segment(vdev->pdev,
119 free_seg);
120 free_seg = next_seg;
121 }
122 return 1;
123 }
124 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530125 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700126 msdu, &(msdu_info->tso_info));
127 msdu_info->tso_info.curr_seg =
128 msdu_info->tso_info.tso_seg_list;
129 num_seg = msdu_info->tso_info.num_segs;
130 } else {
131 msdu_info->tso_info.is_tso = 0;
132 msdu_info->tso_info.num_segs = 1;
133 }
134 return 0;
135}
136#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137
138/**
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800139 * ol_tx_data() - send data frame
140 * @vdev: virtual device handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800141 * @skb: skb
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800142 *
143 * Return: skb/NULL for success
144 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800145qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800146{
Mohit Khanna043efbd2016-05-04 14:19:35 -0700147 struct ol_txrx_pdev_t *pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530148 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149
Mohit Khanna043efbd2016-05-04 14:19:35 -0700150 if (qdf_unlikely(!vdev)) {
151 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
152 "%s:vdev is null", __func__);
153 return skb;
154 } else {
155 pdev = vdev->pdev;
156 }
157
Anurag Chouhanc5548422016-02-24 18:33:27 +0530158 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530159 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160 "%s:pdev is null", __func__);
161 return skb;
162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530165 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
166 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
167 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800168
169 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530170 qdf_nbuf_set_next(skb, NULL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530171 ret = OL_TX_SEND(vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530173 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174 "%s: Failed to tx", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 return ret;
176 }
177
178 return NULL;
179}
180
181#ifdef IPA_OFFLOAD
182/**
183 * ol_tx_send_ipa_data_frame() - send IPA data frame
184 * @vdev: vdev
185 * @skb: skb
186 *
187 * Return: skb/ NULL is for success
188 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530189qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
190 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530192 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530193 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194
Anurag Chouhanc5548422016-02-24 18:33:27 +0530195 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800196 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
197 "%s: pdev is NULL", __func__);
198 return skb;
199 }
200
201 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530202 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
203 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
204 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205
206 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530207 qdf_nbuf_set_next(skb, NULL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530208 ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209 if (ret) {
210 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
211 "%s: Failed to tx", __func__);
212 return ret;
213 }
214
215 return NULL;
216}
217#endif
218
219
220#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530221qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530223 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800224 struct ol_txrx_msdu_info_t msdu_info;
225
226 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
227 msdu_info.htt.action.tx_comp_req = 0;
228 /*
229 * The msdu_list variable could be used instead of the msdu var,
230 * but just to clarify which operations are done on a single MSDU
231 * vs. a list of MSDUs, use a distinct variable for single MSDUs
232 * within the list.
233 */
234 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530235 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 struct ol_tx_desc_t *tx_desc;
237 int segments = 1;
238
Nirav Shahcbc6d722016-03-01 16:24:53 +0530239 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 msdu_info.peer = NULL;
241
Anurag Chouhanc5548422016-02-24 18:33:27 +0530242 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530243 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700244 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
245 tx.dropped.host_reject, msdu);
246 return msdu;
247 }
248
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249 segments = msdu_info.tso_info.num_segs;
250
251 /*
252 * The netbuf may get linked into a different list inside the
253 * ol_tx_send function, so store the next pointer before the
254 * tx_send call.
255 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530256 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800257 /* init the current segment to the 1st segment in the list */
258 while (segments) {
259
260 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530261 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800262 msdu_info.tso_info.curr_seg->
Dhanashri Atre5166d572016-06-03 14:12:22 -0700263 seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264
265 segments--;
266
267 /**
268 * if this is a jumbo nbuf, then increment the number
269 * of nbuf users for each additional segment of the msdu.
270 * This will ensure that the skb is freed only after
271 * receiving tx completion for all segments of an nbuf
272 */
273 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530274 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800275
276 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
277
278 /*
279 * If debug display is enabled, show the meta-data being
280 * downloaded to the target via the HTT tx descriptor.
281 */
282 htt_tx_desc_display(tx_desc->htt_tx_desc);
283
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530284 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800285
286 if (msdu_info.tso_info.curr_seg) {
287 msdu_info.tso_info.curr_seg =
288 msdu_info.tso_info.curr_seg->next;
289 }
290
Nirav Shahcbc6d722016-03-01 16:24:53 +0530291 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292
293 if (msdu_info.tso_info.is_tso) {
294 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
295 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
296 }
297 } /* while segments */
298
299 msdu = next;
300 if (msdu_info.tso_info.is_tso) {
301 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
302 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
303 }
304 } /* while msdus */
305 return NULL; /* all MSDUs were accepted */
306}
307#else /* TSO */
308
Nirav Shahcbc6d722016-03-01 16:24:53 +0530309qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800310{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530311 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 struct ol_txrx_msdu_info_t msdu_info;
313
314 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
315 msdu_info.htt.action.tx_comp_req = 0;
316 msdu_info.tso_info.is_tso = 0;
317 /*
318 * The msdu_list variable could be used instead of the msdu var,
319 * but just to clarify which operations are done on a single MSDU
320 * vs. a list of MSDUs, use a distinct variable for single MSDUs
321 * within the list.
322 */
323 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530324 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 struct ol_tx_desc_t *tx_desc;
326
Nirav Shahcbc6d722016-03-01 16:24:53 +0530327 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328 msdu_info.peer = NULL;
329 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
330
331 /*
332 * If debug display is enabled, show the meta-data being
333 * downloaded to the target via the HTT tx descriptor.
334 */
335 htt_tx_desc_display(tx_desc->htt_tx_desc);
336 /*
337 * The netbuf may get linked into a different list inside the
338 * ol_tx_send function, so store the next pointer before the
339 * tx_send call.
340 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530341 next = qdf_nbuf_next(msdu);
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530342 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 msdu = next;
344 }
345 return NULL; /* all MSDUs were accepted */
346}
347#endif /* TSO */
348
349#ifdef WLAN_FEATURE_FASTPATH
350/**
351 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
352 *
353 * Allocate and prepare Tx descriptor with msdu and fragment descritor
354 * inforamtion.
355 *
356 * @pdev: pointer to ol pdev handle
357 * @vdev: pointer to ol vdev handle
358 * @msdu: linked list of msdu packets
359 * @pkt_download_len: packet download length
360 * @ep_id: endpoint ID
361 * @msdu_info: Handle to msdu_info
362 *
363 * Return: Pointer to Tx descriptor
364 */
365static inline struct ol_tx_desc_t *
366ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530367 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800368 uint32_t pkt_download_len, uint32_t ep_id,
369 struct ol_txrx_msdu_info_t *msdu_info)
370{
371 struct ol_tx_desc_t *tx_desc = NULL;
372 uint32_t *htt_tx_desc;
373 void *htc_hdr_vaddr;
374 u_int32_t num_frags, i;
Nirav Shah2e583a02016-04-30 14:06:12 +0530375 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376
377 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530378 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 return NULL;
380
381 tx_desc->netbuf = msdu;
382 if (msdu_info->tso_info.is_tso) {
383 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700384 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
386 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700387 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388 }
389
390 htt_tx_desc = tx_desc->htt_tx_desc;
391
392 /* Make sure frags num is set to 0 */
393 /*
394 * Do this here rather than in hardstart, so
395 * that we can hopefully take only one cache-miss while
396 * accessing skb->cb.
397 */
398
399 /* HTT Header */
400 /* TODO : Take care of multiple fragments */
401
Nirav Shah2e583a02016-04-30 14:06:12 +0530402 type = ol_tx_get_ext_header_type(vdev, msdu);
403
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 /* TODO: Precompute and store paddr in ol_tx_desc_t */
405 /* Virtual address of the HTT/HTC header, added by driver */
406 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
407 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
408 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
409 &msdu_info->htt, &msdu_info->tso_info,
Nirav Shah2e583a02016-04-30 14:06:12 +0530410 NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411
Nirav Shahcbc6d722016-03-01 16:24:53 +0530412 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800413 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530414 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
415 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800416 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417#if defined(HELIUMPLUS_PADDR64)
418 /*
419 * Use num_frags - 1, since 1 frag is used to store
420 * the HTT/HTC descriptor
421 * Refer to htt_tx_desc_init()
422 */
423 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
424 num_frags - 1);
425#else /* ! defined(HELIUMPLUSPADDR64) */
426 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
427 num_frags-1);
428#endif /* defined(HELIUMPLUS_PADDR64) */
429 if (msdu_info->tso_info.is_tso) {
430 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
431 tx_desc->htt_frag_desc, &msdu_info->tso_info);
432 TXRX_STATS_TSO_SEG_UPDATE(pdev,
433 msdu_info->tso_info.curr_seg->seg);
434 } else {
435 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530436 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530437 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800438
Nirav Shahcbc6d722016-03-01 16:24:53 +0530439 frag_len = qdf_nbuf_get_frag_len(msdu, i);
440 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Nirav Shah2e583a02016-04-30 14:06:12 +0530441 if (type != EXT_HEADER_NOT_PRESENT) {
442 frag_paddr +=
443 sizeof(struct htt_tx_msdu_desc_ext_t);
444 frag_len -=
445 sizeof(struct htt_tx_msdu_desc_ext_t);
446 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447#if defined(HELIUMPLUS_PADDR64)
448 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
449 i - 1, frag_paddr, frag_len);
450#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530451 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800452 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800453 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454 dump_pkt(netbuf, frag_paddr, 64);
455#endif /* HELIUMPLUS_DEBUG */
456#else /* ! defined(HELIUMPLUSPADDR64) */
457 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
458 i - 1, frag_paddr, frag_len);
459#endif /* defined(HELIUMPLUS_PADDR64) */
460 }
461 }
462
463 /*
464 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
465 * this is not required. We still have to mark the swap bit correctly,
466 * when posting to the ring
467 */
468 /* Check to make sure, data download length is correct */
469
470 /*
471 * TODO : Can we remove this check and always download a fixed length ?
472 * */
Nirav Shah2e583a02016-04-30 14:06:12 +0530473
474
475 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
476 pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
477
Nirav Shahcbc6d722016-03-01 16:24:53 +0530478 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
479 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480
481 /* Fill the HTC header information */
482 /*
483 * Passing 0 as the seq_no field, we can probably get away
484 * with it for the time being, since this is not checked in f/w
485 */
486 /* TODO : Prefill this, look at multi-fragment case */
487 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
488
489 return tx_desc;
490}
491#if defined(FEATURE_TSO)
492/**
493 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
494 *
495 * @vdev: handle to ol_txrx_vdev_t
496 * @msdu_list: msdu list to be sent out.
497 *
498 * Return: on success return NULL, pointer to nbuf when it fails to send.
499 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530500qdf_nbuf_t
501ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530503 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504 struct ol_txrx_pdev_t *pdev = vdev->pdev;
505 uint32_t pkt_download_len =
506 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
507 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
508 struct ol_txrx_msdu_info_t msdu_info;
509
510 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
511 msdu_info.htt.action.tx_comp_req = 0;
512 /*
513 * The msdu_list variable could be used instead of the msdu var,
514 * but just to clarify which operations are done on a single MSDU
515 * vs. a list of MSDUs, use a distinct variable for single MSDUs
516 * within the list.
517 */
518 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530519 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800520 struct ol_tx_desc_t *tx_desc;
521 int segments = 1;
522
Nirav Shahcbc6d722016-03-01 16:24:53 +0530523 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800524 msdu_info.peer = NULL;
525
Anurag Chouhanc5548422016-02-24 18:33:27 +0530526 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530527 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700528 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
529 tx.dropped.host_reject, msdu);
530 return msdu;
531 }
532
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 segments = msdu_info.tso_info.num_segs;
534
535 /*
536 * The netbuf may get linked into a different list
537 * inside the ce_send_fast function, so store the next
538 * pointer before the ce_send call.
539 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530540 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800541 /* init the current segment to the 1st segment in the list */
542 while (segments) {
543
544 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530545 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Dhanashri Atre5166d572016-06-03 14:12:22 -0700546 curr_seg->seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547
548 segments--;
549
550 /**
551 * if this is a jumbo nbuf, then increment the number
552 * of nbuf users for each additional segment of the msdu.
553 * This will ensure that the skb is freed only after
554 * receiving tx completion for all segments of an nbuf
555 */
556 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530557 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800558
559 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
560 msdu_info.htt.info.vdev_id = vdev->vdev_id;
561 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530562 qdf_nbuf_get_tx_cksum(msdu);
563 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530564 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
565 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566 /* We want to encrypt this frame */
567 msdu_info.htt.action.do_encrypt = 1;
568 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530569 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 /* We don't want to encrypt this frame */
571 msdu_info.htt.action.do_encrypt = 0;
572 break;
573 default:
574 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530575 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576 break;
577 }
578
579 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
580 pkt_download_len, ep_id,
581 &msdu_info);
582
Anurag Chouhanc5548422016-02-24 18:33:27 +0530583 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530584 DPTRACE(qdf_dp_trace_ptr(msdu,
585 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
586 qdf_nbuf_data_addr(msdu),
587 sizeof(qdf_nbuf_data(msdu)),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530588 tx_desc->id, vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589 /*
590 * If debug display is enabled, show the meta
591 * data being downloaded to the target via the
592 * HTT tx descriptor.
593 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530594 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER
595 (msdu))
596 pkt_download_len +=
597 sizeof(struct htt_tx_msdu_desc_ext_t);
598
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah2e583a02016-04-30 14:06:12 +0530600 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
601 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 /*
603 * The packet could not be sent.
604 * Free the descriptor, return the
605 * packet to the caller.
606 */
607 ol_tx_desc_free(pdev, tx_desc);
608 return msdu;
609 }
610 if (msdu_info.tso_info.curr_seg) {
611 msdu_info.tso_info.curr_seg =
612 msdu_info.tso_info.curr_seg->next;
613 }
614
615 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530616 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800617 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
618 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
619 }
620 } else {
621 TXRX_STATS_MSDU_LIST_INCR(
622 pdev, tx.dropped.host_reject, msdu);
623 /* the list of unaccepted MSDUs */
624 return msdu;
625 }
626 } /* while segments */
627
628 msdu = next;
629 if (msdu_info.tso_info.is_tso) {
630 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
631 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
632 }
633 } /* while msdus */
634 return NULL; /* all MSDUs were accepted */
635}
636#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530637qdf_nbuf_t
638ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530640 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800641 struct ol_txrx_pdev_t *pdev = vdev->pdev;
642 uint32_t pkt_download_len =
643 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
644 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
645 struct ol_txrx_msdu_info_t msdu_info;
646
647 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
648 msdu_info.htt.action.tx_comp_req = 0;
649 msdu_info.tso_info.is_tso = 0;
650 /*
651 * The msdu_list variable could be used instead of the msdu var,
652 * but just to clarify which operations are done on a single MSDU
653 * vs. a list of MSDUs, use a distinct variable for single MSDUs
654 * within the list.
655 */
656 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530657 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658 struct ol_tx_desc_t *tx_desc;
659
Nirav Shahcbc6d722016-03-01 16:24:53 +0530660 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 msdu_info.peer = NULL;
662
663 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
664 msdu_info.htt.info.vdev_id = vdev->vdev_id;
665 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530666 qdf_nbuf_get_tx_cksum(msdu);
667 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530668 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
669 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800670 /* We want to encrypt this frame */
671 msdu_info.htt.action.do_encrypt = 1;
672 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530673 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 /* We don't want to encrypt this frame */
675 msdu_info.htt.action.do_encrypt = 0;
676 break;
677 default:
678 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530679 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800680 break;
681 }
682
683 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
684 pkt_download_len, ep_id,
685 &msdu_info);
686
Anurag Chouhanc5548422016-02-24 18:33:27 +0530687 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530688 DPTRACE(qdf_dp_trace_ptr(msdu,
689 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
690 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530691 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
692 vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 /*
694 * If debug display is enabled, show the meta-data being
695 * downloaded to the target via the HTT tx descriptor.
696 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530697 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
698 pkt_download_len +=
699 sizeof(struct htt_tx_msdu_desc_ext_t);
700
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 htt_tx_desc_display(tx_desc->htt_tx_desc);
702 /*
703 * The netbuf may get linked into a different list
704 * inside the ce_send_fast function, so store the next
705 * pointer before the ce_send call.
706 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530707 next = qdf_nbuf_next(msdu);
Nirav Shah2e583a02016-04-30 14:06:12 +0530708 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
709 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800710 /* The packet could not be sent */
711 /* Free the descriptor, return the packet to the
712 * caller */
713 ol_tx_desc_free(pdev, tx_desc);
714 return msdu;
715 }
716 msdu = next;
717 } else {
718 TXRX_STATS_MSDU_LIST_INCR(
719 pdev, tx.dropped.host_reject, msdu);
720 return msdu; /* the list of unaccepted MSDUs */
721 }
722 }
723
724 return NULL; /* all MSDUs were accepted */
725}
726#endif /* FEATURE_TSO */
727#endif /* WLAN_FEATURE_FASTPATH */
728
729#ifdef WLAN_FEATURE_FASTPATH
730/**
731 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
732 *
733 */
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800734qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530735ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736{
Komal Seelam3d202862016-02-24 18:43:24 +0530737 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530738 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800739
Anurag Chouhanc5548422016-02-24 18:33:27 +0530740 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800741 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
742 else
743 msdu_list = ol_tx_ll(vdev, msdu_list);
744
745 return msdu_list;
746}
747#else
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800748qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530749ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800750{
751 return ol_tx_ll(vdev, msdu_list);
752}
753#endif /* WLAN_FEATURE_FASTPATH */
754
755#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
756
757#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
758#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
759static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
760{
761 int max_to_accept;
762
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530763 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530765 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800766 return;
767 }
768
769 /*
770 * Send as much of the backlog as possible, but leave some margin
771 * of unallocated tx descriptors that can be used for new frames
772 * being transmitted by other vdevs.
773 * Ideally there would be a scheduler, which would not only leave
774 * some margin for new frames for other vdevs, but also would
775 * fairly apportion the tx descriptors between multiple vdevs that
776 * have backlogs in their pause queues.
777 * However, the fairness benefit of having a scheduler for frames
778 * from multiple vdev's pause queues is not sufficient to outweigh
779 * the extra complexity.
780 */
781 max_to_accept = vdev->pdev->tx_desc.num_free -
782 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
783 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530784 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800785 max_to_accept--;
786 vdev->ll_pause.txq.depth--;
787 tx_msdu = vdev->ll_pause.txq.head;
788 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530789 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800790 if (NULL == vdev->ll_pause.txq.head)
791 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530792 qdf_nbuf_set_next(tx_msdu, NULL);
793 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
794 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800795 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
796 /*
797 * It is unexpected that ol_tx_ll would reject the frame
798 * since we checked that there's room for it, though
799 * there's an infinitesimal possibility that between the
800 * time we checked the room available and now, a
801 * concurrent batch of tx frames used up all the room.
802 * For simplicity, just drop the frame.
803 */
804 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530805 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530806 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530807 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800808 }
809 }
810 }
811 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530812 qdf_timer_stop(&vdev->ll_pause.timer);
813 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800814 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
815 vdev->ll_pause.is_q_timer_on = true;
816 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
817 vdev->ll_pause.q_overflow_cnt++;
818 }
819
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530820 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821}
822
Nirav Shahcbc6d722016-03-01 16:24:53 +0530823static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800824ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530825 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800826{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530827 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800828 while (msdu_list &&
829 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530830 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
831 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
832 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530833 DPTRACE(qdf_dp_trace(msdu_list,
834 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shah07e39a62016-04-25 17:46:40 +0530835 qdf_nbuf_data_addr(msdu_list),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530836 sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800837
838 vdev->ll_pause.txq.depth++;
839 if (!vdev->ll_pause.txq.head) {
840 vdev->ll_pause.txq.head = msdu_list;
841 vdev->ll_pause.txq.tail = msdu_list;
842 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530843 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800844 }
845 vdev->ll_pause.txq.tail = msdu_list;
846
847 msdu_list = next;
848 }
849 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530850 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800851
852 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530853 qdf_timer_stop(&vdev->ll_pause.timer);
854 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800855 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
856 vdev->ll_pause.is_q_timer_on = true;
857 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530858 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859
860 return msdu_list;
861}
862
863/*
864 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
865 * If there are too many frames in the tx queue, reject it.
866 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530867qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800868{
869 uint16_t eth_type;
870 uint32_t paused_reason;
871
872 if (msdu_list == NULL)
873 return NULL;
874
875 paused_reason = vdev->ll_pause.paused_reason;
876 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530877 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800878 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
879 paused_reason)) {
880 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530881 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882 ethertype[0] << 8) |
883 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530884 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800885 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
886 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
887 return msdu_list;
888 }
889 }
890 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
891 } else {
892 if (vdev->ll_pause.txq.depth > 0 ||
893 vdev->pdev->tx_throttle.current_throttle_level !=
894 THROTTLE_LEVEL_0) {
895 /* not paused, but there is a backlog of frms
896 from a prior pause or throttle off phase */
897 msdu_list = ol_tx_vdev_pause_queue_append(
898 vdev, msdu_list, 0);
899 /* if throttle is disabled or phase is "on",
900 send the frame */
901 if (vdev->pdev->tx_throttle.current_throttle_level ==
902 THROTTLE_LEVEL_0 ||
903 vdev->pdev->tx_throttle.current_throttle_phase ==
904 THROTTLE_PHASE_ON) {
905 /* send as many frames as possible
906 from the vdevs backlog */
907 ol_tx_vdev_ll_pause_queue_send_base(vdev);
908 }
909 } else {
910 /* not paused, no throttle and no backlog -
911 send the new frames */
912 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
913 }
914 }
915 return msdu_list;
916}
917
918/*
919 * Run through the transmit queues for all the vdevs and
920 * send the pending frames
921 */
922void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
923{
924 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530925 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800926 struct ol_txrx_vdev_t *vdev = NULL;
927 uint8_t more;
928
929 if (NULL == pdev)
930 return;
931
932 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
933 return;
934
935 /* ensure that we send no more than tx_threshold frames at once */
936 max_to_send = pdev->tx_throttle.tx_threshold;
937
938 /* round robin through the vdev queues for the given pdev */
939
940 /* Potential improvement: download several frames from the same vdev
941 at a time, since it is more likely that those frames could be
942 aggregated together, remember which vdev was serviced last,
943 so the next call this function can resume the round-robin
944 traversing where the current invocation left off */
945 do {
946 more = 0;
947 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
948
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530949 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800950 if (vdev->ll_pause.txq.depth) {
951 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530952 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800953 mutex);
954 continue;
955 }
956
957 tx_msdu = vdev->ll_pause.txq.head;
958 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530959 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800960 mutex);
961 continue;
962 }
963
964 max_to_send--;
965 vdev->ll_pause.txq.depth--;
966
967 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530968 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800969
970 if (NULL == vdev->ll_pause.txq.head)
971 vdev->ll_pause.txq.tail = NULL;
972
Nirav Shahcbc6d722016-03-01 16:24:53 +0530973 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
975 /*
976 * It is unexpected that ol_tx_ll would reject
977 * the frame, since we checked that there's
978 * room for it, though there's an infinitesimal
979 * possibility that between the time we checked
980 * the room available and now, a concurrent
981 * batch of tx frames used up all the room.
982 * For simplicity, just drop the frame.
983 */
984 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530985 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530986 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530987 qdf_nbuf_tx_free(tx_msdu,
988 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989 }
990 }
991 /*check if there are more msdus to transmit */
992 if (vdev->ll_pause.txq.depth)
993 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530994 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995 }
996 } while (more && max_to_send);
997
998 vdev = NULL;
999 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301000 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001001 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301002 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1003 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004 &pdev->tx_throttle.tx_timer,
1005 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301006 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001007 return;
1008 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301009 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010 }
1011}
1012
1013void ol_tx_vdev_ll_pause_queue_send(void *context)
1014{
1015 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1016 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1017
1018 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1019 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1020 return;
1021 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1022}
1023#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1024
1025static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1026{
1027 return
1028 tx_spec &
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001029 (OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001030}
1031
1032static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1033{
1034 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1035
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001036 if (tx_spec & OL_TX_SPEC_NO_AGGR)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001037 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001038 if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001039 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001040 if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001041 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1042 return sub_type;
1043}
1044
Nirav Shahcbc6d722016-03-01 16:24:53 +05301045qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001046ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301047 enum ol_tx_spec tx_spec,
1048 qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001049{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301050 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1052 struct ol_txrx_msdu_info_t msdu_info;
1053
1054 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1055 msdu_info.htt.action.tx_comp_req = 0;
1056
1057 /*
1058 * The msdu_list variable could be used instead of the msdu var,
1059 * but just to clarify which operations are done on a single MSDU
1060 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1061 * within the list.
1062 */
1063 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301064 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001065 struct ol_tx_desc_t *tx_desc;
1066
Nirav Shahcbc6d722016-03-01 16:24:53 +05301067 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 msdu_info.peer = NULL;
1069 msdu_info.tso_info.is_tso = 0;
1070
1071 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1072
1073 /*
1074 * The netbuf may get linked into a different list inside the
1075 * ol_tx_send function, so store the next pointer before the
1076 * tx_send call.
1077 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301078 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001079
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001080 if (tx_spec != OL_TX_SPEC_STD) {
1081 if (tx_spec & OL_TX_SPEC_NO_FREE) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001082 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001083 } else if (tx_spec & OL_TX_SPEC_TSO) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001084 tx_desc->pkt_type = OL_TX_FRM_TSO;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001085 } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086 uint8_t sub_type =
1087 ol_txrx_tx_raw_subtype(tx_spec);
1088 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301089 htt_pkt_type_native_wifi,
1090 sub_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1092 /* different types of raw frames */
1093 uint8_t sub_type =
1094 ol_txrx_tx_raw_subtype(tx_spec);
1095 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301096 htt_pkt_type_raw, sub_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001097 }
1098 }
1099 /*
1100 * If debug display is enabled, show the meta-data being
1101 * downloaded to the target via the HTT tx descriptor.
1102 */
1103 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301104 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001105 msdu = next;
1106 }
1107 return NULL; /* all MSDUs were accepted */
1108}
1109
1110#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1111#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1112 do { \
1113 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301114 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1116 if (tx_msdu_info.peer) { \
1117 /* remove the peer reference added above */ \
1118 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1119 } \
1120 goto MSDU_LOOP_BOTTOM; \
1121 } \
1122 } while (0)
1123#else
1124#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1125#endif
1126
1127/* tx filtering is handled within the target FW */
1128#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1129
1130/**
1131 * parse_ocb_tx_header() - Function to check for OCB
Nirav Shahcbc6d722016-03-01 16:24:53 +05301132 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301133 * @tx_ctrl: TX control header on a packet and extract it if present
1134 *
1135 * Return: true if ocb parsing is successful
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001136 */
1137#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301138bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1140{
1141 struct ether_header *eth_hdr_p;
1142 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1143
1144 /* Check if TX control header is present */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301145 eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301146 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001147 /* TX control header is not present. Nothing to do.. */
1148 return true;
1149
1150 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301151 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001152
1153 /* Parse the TX control header */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301154 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001155
1156 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1157 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301158 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301159 sizeof(*tx_ctrl_hdr));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160 } else {
1161 /* The TX control header is invalid. */
1162 return false;
1163 }
1164
1165 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301166 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001167 return true;
1168}
1169
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301170
1171#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
1172
1173/**
1174 * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
1175 * for a HL system.
1176 * @pdev: the data physical device sending the data
1177 * @vdev: the virtual device sending the data
1178 * @msdu: the tx frame
1179 * @msdu_info: the tx meta data
1180 *
1181 * Return: the tx decriptor
1182 */
1183static inline
1184struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1185 struct ol_txrx_vdev_t *vdev,
1186 qdf_nbuf_t msdu,
1187 struct ol_txrx_msdu_info_t *msdu_info)
1188{
1189 struct ol_tx_desc_t *tx_desc = NULL;
1190
1191 if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
1192 TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
1193 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1194 } else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
1195 if ((qdf_nbuf_is_ipv4_dhcp_pkt(msdu) == true) ||
1196 (qdf_nbuf_is_ipv4_eapol_pkt(msdu) == true)) {
1197 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1198 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1199 "Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
1200 }
1201 }
1202 return tx_desc;
1203}
1204#else
1205
1206static inline
1207struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1208 struct ol_txrx_vdev_t *vdev,
1209 qdf_nbuf_t msdu,
1210 struct ol_txrx_msdu_info_t *msdu_info)
1211{
1212 struct ol_tx_desc_t *tx_desc = NULL;
1213 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1214 return tx_desc;
1215}
1216#endif
1217
1218#if defined(CONFIG_HL_SUPPORT)
1219
1220/**
1221 * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
1222 * for management frame
1223 * @pdev: the data physical device sending the data
1224 * @vdev: the virtual device sending the data
1225 * @tx_mgmt_frm: the tx managment frame
1226 * @tx_msdu_info: the tx meta data
1227 *
1228 * Return: the tx decriptor
1229 */
1230static inline
1231struct ol_tx_desc_t *
1232ol_txrx_mgmt_tx_desc_alloc(
1233 struct ol_txrx_pdev_t *pdev,
1234 struct ol_txrx_vdev_t *vdev,
1235 qdf_nbuf_t tx_mgmt_frm,
1236 struct ol_txrx_msdu_info_t *tx_msdu_info)
1237{
1238 struct ol_tx_desc_t *tx_desc;
1239 tx_msdu_info->htt.action.tx_comp_req = 1;
1240 tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1241 return tx_desc;
1242}
1243
1244/**
1245 * ol_txrx_mgmt_send_frame() - send a management frame
1246 * @vdev: virtual device sending the frame
1247 * @tx_desc: tx desc
1248 * @tx_mgmt_frm: management frame to send
1249 * @tx_msdu_info: the tx meta data
1250 * @chanfreq: download change frequency
1251 *
1252 * Return:
1253 * 0 -> the frame is accepted for transmission, -OR-
1254 * 1 -> the frame was not accepted
1255 */
1256static inline
1257int ol_txrx_mgmt_send_frame(
1258 struct ol_txrx_vdev_t *vdev,
1259 struct ol_tx_desc_t *tx_desc,
1260 qdf_nbuf_t tx_mgmt_frm,
1261 struct ol_txrx_msdu_info_t *tx_msdu_info,
1262 uint16_t chanfreq)
1263{
1264 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1265 struct ol_tx_frms_queue_t *txq;
1266 /*
1267 * 1. Look up the peer and queue the frame in the peer's mgmt queue.
1268 * 2. Invoke the download scheduler.
1269 */
1270 txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
1271 if (!txq) {
1272 /*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
1273 msdu);*/
1274 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1275 ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
1276 1 /* error */);
1277 if (tx_msdu_info->peer) {
1278 /* remove the peer reference added above */
1279 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1280 }
1281 return 1; /* can't accept the tx mgmt frame */
1282 }
1283 /* Initialize the HTT tx desc l2 header offset field.
1284 * Even though tx encap does not apply to mgmt frames,
1285 * htt_tx_desc_mpdu_header still needs to be called,
1286 * to specifiy that there was no L2 header added by tx encap,
1287 * so the frame's length does not need to be adjusted to account for
1288 * an added L2 header.
1289 */
1290 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1291 htt_tx_desc_init(
1292 pdev->htt_pdev, tx_desc->htt_tx_desc,
1293 tx_desc->htt_tx_desc_paddr,
1294 ol_tx_desc_id(pdev, tx_desc),
1295 tx_mgmt_frm,
1296 &tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0);
1297 htt_tx_desc_display(tx_desc->htt_tx_desc);
1298 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1299
1300 ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
1301 if (tx_msdu_info->peer) {
1302 /* remove the peer reference added above */
1303 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1304 }
1305 ol_tx_sched(vdev->pdev);
1306
1307 return 0;
1308}
1309
1310#else
1311
1312static inline
1313struct ol_tx_desc_t *
1314ol_txrx_mgmt_tx_desc_alloc(
1315 struct ol_txrx_pdev_t *pdev,
1316 struct ol_txrx_vdev_t *vdev,
1317 qdf_nbuf_t tx_mgmt_frm,
1318 struct ol_txrx_msdu_info_t *tx_msdu_info)
1319{
1320 struct ol_tx_desc_t *tx_desc;
1321 /* For LL tx_comp_req is not used so initialized to 0 */
1322 tx_msdu_info->htt.action.tx_comp_req = 0;
1323 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1324 /* FIX THIS -
1325 * The FW currently has trouble using the host's fragments table
1326 * for management frames. Until this is fixed, rather than
1327 * specifying the fragment table to the FW, specify just the
1328 * address of the initial fragment.
1329 */
1330#if defined(HELIUMPLUS_PADDR64)
1331 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1332 tx_desc); */
1333#endif /* defined(HELIUMPLUS_PADDR64) */
1334 if (tx_desc) {
1335 /*
1336 * Following the call to ol_tx_desc_ll, frag 0 is the
1337 * HTT tx HW descriptor, and the frame payload is in
1338 * frag 1.
1339 */
1340 htt_tx_desc_frags_table_set(
1341 pdev->htt_pdev,
1342 tx_desc->htt_tx_desc,
1343 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
1344 0, 0);
1345#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1346 dump_frag_desc(
1347 "after htt_tx_desc_frags_table_set",
1348 tx_desc);
1349#endif /* defined(HELIUMPLUS_PADDR64) */
1350 }
1351
1352 return tx_desc;
1353}
1354
1355static inline
1356int ol_txrx_mgmt_send_frame(
1357 struct ol_txrx_vdev_t *vdev,
1358 struct ol_tx_desc_t *tx_desc,
1359 qdf_nbuf_t tx_mgmt_frm,
1360 struct ol_txrx_msdu_info_t *tx_msdu_info,
1361 uint16_t chanfreq)
1362{
1363 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1364 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1365 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1366 QDF_NBUF_TX_PKT_MGMT_TRACK;
1367 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
1368 htt_pkt_type_mgmt);
1369
1370 return 0;
1371}
1372#endif
1373
1374/**
1375 * ol_tx_hl_base() - send tx frames for a HL system.
1376 * @vdev: the virtual device sending the data
1377 * @tx_spec: indicate what non-standard transmission actions to apply
1378 * @msdu_list: the tx frames to send
1379 * @tx_comp_req: tx completion req
1380 *
1381 * Return: NULL if all MSDUs are accepted
1382 */
1383static inline qdf_nbuf_t
1384ol_tx_hl_base(
1385 ol_txrx_vdev_handle vdev,
1386 enum ol_tx_spec tx_spec,
1387 qdf_nbuf_t msdu_list,
1388 int tx_comp_req)
1389{
1390 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1391 qdf_nbuf_t msdu = msdu_list;
1392 struct ol_txrx_msdu_info_t tx_msdu_info;
1393 struct ocb_tx_ctrl_hdr_t tx_ctrl;
1394
1395 htt_pdev_handle htt_pdev = pdev->htt_pdev;
1396 tx_msdu_info.peer = NULL;
1397 tx_msdu_info.tso_info.is_tso = 0;
1398
1399 /*
1400 * The msdu_list variable could be used instead of the msdu var,
1401 * but just to clarify which operations are done on a single MSDU
1402 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1403 * within the list.
1404 */
1405 while (msdu) {
1406 qdf_nbuf_t next;
1407 struct ol_tx_frms_queue_t *txq;
1408 struct ol_tx_desc_t *tx_desc = NULL;
1409
1410 qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
1411
1412 /*
1413 * The netbuf will get stored into a (peer-TID) tx queue list
1414 * inside the ol_tx_classify_store function or else dropped,
1415 * so store the next pointer immediately.
1416 */
1417 next = qdf_nbuf_next(msdu);
1418
1419 tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
1420
1421 if (!tx_desc) {
1422 /*
1423 * If we're out of tx descs, there's no need to try
1424 * to allocate tx descs for the remaining MSDUs.
1425 */
1426 TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
1427 msdu);
1428 return msdu; /* the list of unaccepted MSDUs */
1429 }
1430
1431 /* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
1432
1433 if (tx_spec != OL_TX_SPEC_STD) {
1434#if defined(FEATURE_WLAN_TDLS)
1435 if (tx_spec & OL_TX_SPEC_NO_FREE) {
1436 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
1437 } else if (tx_spec & OL_TX_SPEC_TSO) {
1438#else
1439 if (tx_spec & OL_TX_SPEC_TSO) {
1440#endif
1441 tx_desc->pkt_type = OL_TX_FRM_TSO;
1442 }
1443 if (ol_txrx_tx_is_raw(tx_spec)) {
1444 /* CHECK THIS: does this need
1445 * to happen after htt_tx_desc_init?
1446 */
1447 /* different types of raw frames */
1448 u_int8_t sub_type =
1449 ol_txrx_tx_raw_subtype(
1450 tx_spec);
1451 htt_tx_desc_type(htt_pdev,
1452 tx_desc->htt_tx_desc,
1453 htt_pkt_type_raw,
1454 sub_type);
1455 }
1456 }
1457
1458 tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
1459 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1460 tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
1461 tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
1462 tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
1463
1464 /* If the vdev is in OCB mode,
1465 * parse the tx control header.
1466 */
1467 if (vdev->opmode == wlan_op_mode_ocb) {
1468 if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
1469 /* There was an error parsing
1470 * the header.Skip this packet.
1471 */
1472 goto MSDU_LOOP_BOTTOM;
1473 }
1474 }
1475
1476 txq = ol_tx_classify(vdev, tx_desc, msdu,
1477 &tx_msdu_info);
1478
1479 if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
1480 /* drop this frame,
1481 * but try sending subsequent frames
1482 */
1483 /*TXRX_STATS_MSDU_LIST_INCR(pdev,
1484 tx.dropped.no_txq,
1485 msdu);*/
1486 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1487 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
1488 if (tx_msdu_info.peer) {
1489 /* remove the peer reference
1490 * added above */
1491 ol_txrx_peer_unref_delete(
1492 tx_msdu_info.peer);
1493 }
1494 goto MSDU_LOOP_BOTTOM;
1495 }
1496
1497 if (tx_msdu_info.peer) {
1498 /*If the state is not associated then drop all
1499 *the data packets received for that peer*/
1500 if (tx_msdu_info.peer->state ==
1501 OL_TXRX_PEER_STATE_DISC) {
1502 qdf_atomic_inc(
1503 &pdev->tx_queue.rsrc_cnt);
1504 ol_tx_desc_frame_free_nonstd(pdev,
1505 tx_desc,
1506 1);
1507 ol_txrx_peer_unref_delete(
1508 tx_msdu_info.peer);
1509 msdu = next;
1510 continue;
1511 } else if (tx_msdu_info.peer->state !=
1512 OL_TXRX_PEER_STATE_AUTH) {
1513 if (tx_msdu_info.htt.info.ethertype !=
1514 ETHERTYPE_PAE &&
1515 tx_msdu_info.htt.info.ethertype
1516 != ETHERTYPE_WAI) {
1517 qdf_atomic_inc(
1518 &pdev->tx_queue.
1519 rsrc_cnt);
1520 ol_tx_desc_frame_free_nonstd(
1521 pdev,
1522 tx_desc, 1);
1523 ol_txrx_peer_unref_delete(
1524 tx_msdu_info.peer);
1525 msdu = next;
1526 continue;
1527 }
1528 }
1529 }
1530 /*
1531 * Initialize the HTT tx desc l2 header offset field.
1532 * htt_tx_desc_mpdu_header needs to be called to
1533 * make sure, the l2 header size is initialized
1534 * correctly to handle cases where TX ENCAP is disabled
1535 * or Tx Encap fails to perform Encap
1536 */
1537 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1538
1539 /*
1540 * Note: when the driver is built without support for
1541 * SW tx encap,the following macro is a no-op.
1542 * When the driver is built with support for SW tx
1543 * encap, it performs encap, and if an error is
1544 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
1545 */
1546 OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
1547 tx_msdu_info);
1548
1549 /* initialize the HW tx descriptor */
1550 htt_tx_desc_init(
1551 pdev->htt_pdev, tx_desc->htt_tx_desc,
1552 tx_desc->htt_tx_desc_paddr,
1553 ol_tx_desc_id(pdev, tx_desc),
1554 msdu,
1555 &tx_msdu_info.htt,
1556 &tx_msdu_info.tso_info,
1557 &tx_ctrl,
1558 vdev->opmode == wlan_op_mode_ocb);
1559 /*
1560 * If debug display is enabled, show the meta-data
1561 * being downloaded to the target via the
1562 * HTT tx descriptor.
1563 */
1564 htt_tx_desc_display(tx_desc->htt_tx_desc);
1565
1566 ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
1567 if (tx_msdu_info.peer) {
1568 OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
1569 msdu);
1570 /* remove the peer reference added above */
1571 ol_txrx_peer_unref_delete(tx_msdu_info.peer);
1572 }
1573MSDU_LOOP_BOTTOM:
1574 msdu = next;
1575 }
1576 ol_tx_sched(pdev);
1577 return NULL; /* all MSDUs were accepted */
1578}
1579
1580qdf_nbuf_t
1581ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1582{
1583 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1584 int tx_comp_req = pdev->cfg.default_tx_comp_req;
1585 return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
1586}
1587
1588qdf_nbuf_t
1589ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
1590 enum ol_tx_spec tx_spec,
1591 qdf_nbuf_t msdu_list)
1592{
1593 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1594 int tx_comp_req = pdev->cfg.default_tx_comp_req;
1595
1596 if (!tx_comp_req) {
1597 if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1598 (pdev->tx_data_callback.func))
1599 tx_comp_req = 1;
1600 }
1601 return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
1602}
1603
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001604/**
1605 * ol_tx_non_std - Allow the control-path SW to send data frames
1606 *
1607 * @data_vdev - which vdev should transmit the tx data frames
1608 * @tx_spec - what non-standard handling to apply to the tx data frames
1609 * @msdu_list - NULL-terminated list of tx MSDUs
1610 *
1611 * Generally, all tx data frames come from the OS shim into the txrx layer.
1612 * However, there are rare cases such as TDLS messaging where the UMAC
1613 * control-path SW creates tx data frames.
1614 * This UMAC SW can call this function to provide the tx data frames to
1615 * the txrx layer.
1616 * The UMAC SW can request a callback for these data frames after their
1617 * transmission completes, by using the ol_txrx_data_tx_cb_set function
1618 * to register a tx completion callback, and by specifying
1619 * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
1620 * ol_tx_non_std.
1621 * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
1622 * as specified by ol_cfg_frame_type().
1623 *
1624 * Return: null - success, skb - failure
1625 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301626qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001627ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301628 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001629{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301630 if (vdev->pdev->cfg.is_high_latency)
1631 return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
1632 else
1633 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001634}
1635
1636void
1637ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1638 ol_txrx_data_tx_cb callback, void *ctxt)
1639{
1640 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1641 pdev->tx_data_callback.func = callback;
1642 pdev->tx_data_callback.ctxt = ctxt;
1643}
1644
Dhanashri Atre12a08392016-02-17 13:10:34 -08001645/**
1646 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1647 * notifications for management frames.
1648 *
1649 * @pdev - the data physical device object
1650 * @type - the type of mgmt frame the callback is used for
1651 * @download_cb - the callback for notification of delivery to the target
1652 * @ota_ack_cb - the callback for notification of delivery to the peer
1653 * @ctxt - context to use with the callback
1654 *
1655 * When the txrx SW receives notifications from the target that a tx frame
1656 * has been delivered to its recipient, it will check if the tx frame
1657 * is a management frame. If so, the txrx SW will check the management
1658 * frame type specified when the frame was submitted for transmission.
1659 * If there is a callback function registered for the type of managment
1660 * frame in question, the txrx code will invoke the callback to inform
1661 * the management + control SW that the mgmt frame was delivered.
1662 * This function is used by the control SW to store a callback pointer
1663 * for a given type of management frame.
1664 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001665void
1666ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1667 uint8_t type,
1668 ol_txrx_mgmt_tx_cb download_cb,
1669 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1670{
1671 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1672 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1673 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1674 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1675}
1676
1677#if defined(HELIUMPLUS_PADDR64)
1678void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1679{
1680 uint32_t *frag_ptr_i_p;
1681 int i;
1682
Anurag Chouhan6d760662016-02-20 16:05:43 +05301683 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001684 tx_desc, tx_desc->id);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301685 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001686 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301687 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001688 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001689
1690 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1691 is already de-referrable (=> in virtual address space) */
1692 frag_ptr_i_p = tx_desc->htt_frag_desc;
1693
1694 /* Dump 6 words of TSO flags */
1695 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1696 DUMP_PREFIX_NONE, 8, 4,
1697 frag_ptr_i_p, 24, true);
1698
1699 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1700
1701 i = 0;
1702 while (*frag_ptr_i_p) {
1703 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1704 DUMP_PREFIX_NONE, 8, 4,
1705 frag_ptr_i_p, 8, true);
1706 i++;
1707 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1708 break;
1709 else /* jump to next pointer - skip length */
1710 frag_ptr_i_p += 2;
1711 }
1712 return;
1713}
1714#endif /* HELIUMPLUS_PADDR64 */
1715
Dhanashri Atre12a08392016-02-17 13:10:34 -08001716/**
1717 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1718 *
1719 * @vdev - virtual device transmitting the frame
1720 * @tx_mgmt_frm - management frame to transmit
1721 * @type - the type of managment frame (determines what callback to use)
1722 * @use_6mbps - specify whether management frame to transmit should
1723 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1724 * @chanfreq - channel to transmit the frame on
1725 *
1726 * Send the specified management frame from the specified virtual device.
1727 * The type is used for determining whether to invoke a callback to inform
1728 * the sender that the tx mgmt frame was delivered, and if so, which
1729 * callback to use.
1730 *
1731 * Return: 0 - the frame is accepted for transmission
1732 * 1 - the frame was not accepted
1733 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001734int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001735ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301736 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001737 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1738{
1739 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1740 struct ol_tx_desc_t *tx_desc;
1741 struct ol_txrx_msdu_info_t tx_msdu_info;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301742 int result = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001743 tx_msdu_info.tso_info.is_tso = 0;
1744
1745 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1746 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1747 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1748 tx_msdu_info.htt.action.do_tx_complete =
1749 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1750
1751 /*
1752 * FIX THIS: l2_hdr_type should only specify L2 header type
1753 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1754 * that is a combination of L2 header type and 802.11 frame type.
1755 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1756 * But if the 802.11 frame type is "data", then the HTT pkt type is
1757 * the L2 header type (more or less): 802.3 vs. Native WiFi
1758 * (basic 802.11).
1759 * (Or the header type can be "raw", which is any version of the 802.11
1760 * header, and also implies that some of the offloaded tx data
1761 * processing steps may not apply.)
1762 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1763 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1764 * needs to overload the l2_hdr_type to indicate whether the frame is
1765 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1766 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1767 * just the L2 header type. For mgmt frames, there should be a
1768 * separate function to patch the HTT pkt type to store a "mgmt" value
1769 * rather than the L2 header type. Then the HTT pkt type can be
1770 * programmed efficiently for data frames, and the msdu_info's
1771 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1772 * frame type rather than the L2 header type.
1773 */
1774 /*
1775 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1776 * htt_pkt_type_mgmt
1777 * The htt module expects a "enum htt_pkt_type" value.
1778 * The htt_dxe module expects a "enum htt_frm_type" value.
1779 * This needs to be cleaned up, so both versions of htt use a
1780 * consistent method of specifying the frame type.
1781 */
1782#ifdef QCA_SUPPORT_INTEGRATED_SOC
1783 /* tx mgmt frames always come with a 802.11 header */
1784 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1785 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1786#else
1787 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1788 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1789#endif
1790
1791 tx_msdu_info.peer = NULL;
1792
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301793 tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm,
1794 &tx_msdu_info);
Nirav Shah2e583a02016-04-30 14:06:12 +05301795 if (!tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001796 return -EINVAL; /* can't accept the tx mgmt frame */
Nirav Shah2e583a02016-04-30 14:06:12 +05301797
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001798 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1799 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1800 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1801
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301802 result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm,
1803 &tx_msdu_info, chanfreq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001804
1805 return 0; /* accepted the tx mgmt frame */
1806}
1807
1808void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1809{
1810 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1811}
1812
Nirav Shahcbc6d722016-03-01 16:24:53 +05301813qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1814 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001815{
1816 struct ol_tx_desc_t *tx_desc;
1817 struct ol_txrx_msdu_info_t msdu_info;
1818
1819 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1820 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1821 msdu_info.peer = NULL;
1822 msdu_info.htt.action.tx_comp_req = 0;
1823 msdu_info.tso_info.is_tso = 0;
1824
1825 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1826 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1827
1828 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1829
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301830 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001831
1832 return NULL;
1833}
1834
1835#if defined(FEATURE_TSO)
1836void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1837{
1838 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301839 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001840
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301841 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001842 pdev->tso_seg_pool.freelist = c_element;
1843 for (i = 0; i < (num_seg - 1); i++) {
1844 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301845 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001846 c_element = c_element->next;
1847 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001848 }
Leo Chang376398b2015-10-23 14:19:02 -07001849 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301850 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001851}
1852
1853void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1854{
Leo Chang376398b2015-10-23 14:19:02 -07001855 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301856 struct qdf_tso_seg_elem_t *c_element;
1857 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001858
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301859 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001860 c_element = pdev->tso_seg_pool.freelist;
1861 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1862 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301863 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001864 c_element = temp;
1865 if (!c_element)
1866 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001867 }
1868
1869 pdev->tso_seg_pool.freelist = NULL;
1870 pdev->tso_seg_pool.num_free = 0;
1871 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301872 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1873 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001874}
1875#endif /* FEATURE_TSO */