blob: 1e49ee1e144b90e523dd53792fbabf1544acd450 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053044#include <ol_tx_classify.h> /* ol_tx_classify, ol_tx_classify_mgmt */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include <ol_tx_queue.h> /* ol_tx_enqueue */
Siddarth Poddarb2011f62016-04-27 20:45:42 +053046#include <ol_tx_sched.h> /* ol_tx_sched */
47
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080048
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52
53#ifdef WLAN_FEATURE_FASTPATH
54#include <hif.h> /* HIF_DEVICE */
55#include <htc_api.h> /* Layering violation, but required for fast path */
56#include <htt_internal.h>
57#include <htt_types.h> /* htc_endpoint */
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070058#include <cdp_txrx_peer_ops.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059
Nirav Shah2e583a02016-04-30 14:06:12 +053060int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
61 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062#endif /* WLAN_FEATURE_FASTPATH */
63
64/*
65 * The TXRX module doesn't accept tx frames unless the target has
66 * enough descriptors for them.
67 * For LL, the TXRX descriptor pool is sized to match the target's
68 * descriptor pool. Hence, if the descriptor allocation in TXRX
69 * succeeds, that guarantees that the target has room to accept
70 * the new tx frame.
71 */
72#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
73 do { \
74 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
75 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
76 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053077 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080078 TXRX_STATS_MSDU_LIST_INCR( \
79 pdev, tx.dropped.host_reject, msdu); \
80 return msdu; /* the list of unaccepted MSDUs */ \
81 } \
82 } while (0)
83
Dhanashri Atre83d373d2015-07-28 16:45:59 -070084#if defined(FEATURE_TSO)
85/**
86 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
87 * related information in the msdu_info meta data
88 * @vdev: virtual device handle
89 * @msdu: network buffer
90 * @msdu_info: meta data associated with the msdu
91 *
92 * Return: 0 - success, >0 - error
93 */
94static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053095 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070096{
97 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053098 if (qdf_nbuf_is_tso(msdu)) {
99 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700100 msdu_info->tso_info.tso_seg_list = NULL;
101 msdu_info->tso_info.num_segs = num_seg;
102 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530103 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700104 ol_tso_alloc_segment(vdev->pdev);
105 if (tso_seg) {
106 tso_seg->next =
107 msdu_info->tso_info.tso_seg_list;
108 msdu_info->tso_info.tso_seg_list
109 = tso_seg;
110 num_seg--;
111 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530112 struct qdf_tso_seg_elem_t *next_seg;
113 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700114 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530115 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700116 while (free_seg) {
117 next_seg = free_seg->next;
118 ol_tso_free_segment(vdev->pdev,
119 free_seg);
120 free_seg = next_seg;
121 }
122 return 1;
123 }
124 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530125 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700126 msdu, &(msdu_info->tso_info));
127 msdu_info->tso_info.curr_seg =
128 msdu_info->tso_info.tso_seg_list;
129 num_seg = msdu_info->tso_info.num_segs;
130 } else {
131 msdu_info->tso_info.is_tso = 0;
132 msdu_info->tso_info.num_segs = 1;
133 }
134 return 0;
135}
136#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137
138/**
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800139 * ol_tx_data() - send data frame
140 * @vdev: virtual device handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800141 * @skb: skb
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800142 *
143 * Return: skb/NULL for success
144 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800145qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800146{
Mohit Khanna043efbd2016-05-04 14:19:35 -0700147 struct ol_txrx_pdev_t *pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530148 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149
Mohit Khanna043efbd2016-05-04 14:19:35 -0700150 if (qdf_unlikely(!vdev)) {
151 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
152 "%s:vdev is null", __func__);
153 return skb;
154 } else {
155 pdev = vdev->pdev;
156 }
157
Anurag Chouhanc5548422016-02-24 18:33:27 +0530158 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530159 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160 "%s:pdev is null", __func__);
161 return skb;
162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800163
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530165 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
166 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
167 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800168
169 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530170 qdf_nbuf_set_next(skb, NULL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530171 ret = OL_TX_SEND(vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530173 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174 "%s: Failed to tx", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 return ret;
176 }
177
178 return NULL;
179}
180
181#ifdef IPA_OFFLOAD
182/**
183 * ol_tx_send_ipa_data_frame() - send IPA data frame
184 * @vdev: vdev
185 * @skb: skb
186 *
187 * Return: skb/ NULL is for success
188 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530189qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
190 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530192 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530193 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194
Anurag Chouhanc5548422016-02-24 18:33:27 +0530195 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800196 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
197 "%s: pdev is NULL", __func__);
198 return skb;
199 }
200
201 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530202 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
203 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
204 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205
206 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530207 qdf_nbuf_set_next(skb, NULL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530208 ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800209 if (ret) {
210 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
211 "%s: Failed to tx", __func__);
212 return ret;
213 }
214
215 return NULL;
216}
217#endif
218
219
220#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530221qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530223 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800224 struct ol_txrx_msdu_info_t msdu_info;
225
226 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
227 msdu_info.htt.action.tx_comp_req = 0;
228 /*
229 * The msdu_list variable could be used instead of the msdu var,
230 * but just to clarify which operations are done on a single MSDU
231 * vs. a list of MSDUs, use a distinct variable for single MSDUs
232 * within the list.
233 */
234 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530235 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800236 struct ol_tx_desc_t *tx_desc;
237 int segments = 1;
238
Nirav Shahcbc6d722016-03-01 16:24:53 +0530239 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 msdu_info.peer = NULL;
241
Anurag Chouhanc5548422016-02-24 18:33:27 +0530242 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530243 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700244 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
245 tx.dropped.host_reject, msdu);
246 return msdu;
247 }
248
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249 segments = msdu_info.tso_info.num_segs;
Nirav Shahda008342016-05-17 18:50:40 +0530250 TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
251 TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
252 qdf_nbuf_tcp_tso_size(msdu));
253 TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
254 qdf_nbuf_len(msdu));
255 TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
256 qdf_nbuf_get_nr_frags(msdu));
257
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800258
259 /*
260 * The netbuf may get linked into a different list inside the
261 * ol_tx_send function, so store the next pointer before the
262 * tx_send call.
263 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530264 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800265 /* init the current segment to the 1st segment in the list */
266 while (segments) {
267
268 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530269 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800270 msdu_info.tso_info.curr_seg->
Dhanashri Atre5166d572016-06-03 14:12:22 -0700271 seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272
273 segments--;
274
275 /**
276 * if this is a jumbo nbuf, then increment the number
277 * of nbuf users for each additional segment of the msdu.
278 * This will ensure that the skb is freed only after
279 * receiving tx completion for all segments of an nbuf
280 */
281 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530282 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800283
284 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
285
Nirav Shahda008342016-05-17 18:50:40 +0530286 TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
287
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800288 /*
289 * If debug display is enabled, show the meta-data being
290 * downloaded to the target via the HTT tx descriptor.
291 */
292 htt_tx_desc_display(tx_desc->htt_tx_desc);
293
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530294 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295
296 if (msdu_info.tso_info.curr_seg) {
297 msdu_info.tso_info.curr_seg =
298 msdu_info.tso_info.curr_seg->next;
299 }
300
Nirav Shahcbc6d722016-03-01 16:24:53 +0530301 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302
303 if (msdu_info.tso_info.is_tso) {
304 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
305 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
306 }
307 } /* while segments */
308
309 msdu = next;
310 if (msdu_info.tso_info.is_tso) {
311 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
312 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
313 }
314 } /* while msdus */
315 return NULL; /* all MSDUs were accepted */
316}
317#else /* TSO */
318
Nirav Shahcbc6d722016-03-01 16:24:53 +0530319qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530321 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 struct ol_txrx_msdu_info_t msdu_info;
323
324 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
325 msdu_info.htt.action.tx_comp_req = 0;
326 msdu_info.tso_info.is_tso = 0;
327 /*
328 * The msdu_list variable could be used instead of the msdu var,
329 * but just to clarify which operations are done on a single MSDU
330 * vs. a list of MSDUs, use a distinct variable for single MSDUs
331 * within the list.
332 */
333 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530334 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800335 struct ol_tx_desc_t *tx_desc;
336
Nirav Shahcbc6d722016-03-01 16:24:53 +0530337 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338 msdu_info.peer = NULL;
339 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
340
Nirav Shahda008342016-05-17 18:50:40 +0530341 TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
342
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 /*
344 * If debug display is enabled, show the meta-data being
345 * downloaded to the target via the HTT tx descriptor.
346 */
347 htt_tx_desc_display(tx_desc->htt_tx_desc);
348 /*
349 * The netbuf may get linked into a different list inside the
350 * ol_tx_send function, so store the next pointer before the
351 * tx_send call.
352 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530353 next = qdf_nbuf_next(msdu);
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530354 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 msdu = next;
356 }
357 return NULL; /* all MSDUs were accepted */
358}
359#endif /* TSO */
360
361#ifdef WLAN_FEATURE_FASTPATH
362/**
363 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
364 *
365 * Allocate and prepare Tx descriptor with msdu and fragment descritor
366 * inforamtion.
367 *
368 * @pdev: pointer to ol pdev handle
369 * @vdev: pointer to ol vdev handle
370 * @msdu: linked list of msdu packets
371 * @pkt_download_len: packet download length
372 * @ep_id: endpoint ID
373 * @msdu_info: Handle to msdu_info
374 *
375 * Return: Pointer to Tx descriptor
376 */
377static inline struct ol_tx_desc_t *
378ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530379 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380 uint32_t pkt_download_len, uint32_t ep_id,
381 struct ol_txrx_msdu_info_t *msdu_info)
382{
383 struct ol_tx_desc_t *tx_desc = NULL;
384 uint32_t *htt_tx_desc;
385 void *htc_hdr_vaddr;
386 u_int32_t num_frags, i;
Nirav Shah2e583a02016-04-30 14:06:12 +0530387 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800388
389 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530390 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391 return NULL;
392
393 tx_desc->netbuf = msdu;
394 if (msdu_info->tso_info.is_tso) {
395 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700396 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800397 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
398 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700399 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 }
401
402 htt_tx_desc = tx_desc->htt_tx_desc;
403
404 /* Make sure frags num is set to 0 */
405 /*
406 * Do this here rather than in hardstart, so
407 * that we can hopefully take only one cache-miss while
408 * accessing skb->cb.
409 */
410
411 /* HTT Header */
412 /* TODO : Take care of multiple fragments */
413
Nirav Shah2e583a02016-04-30 14:06:12 +0530414 type = ol_tx_get_ext_header_type(vdev, msdu);
415
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416 /* TODO: Precompute and store paddr in ol_tx_desc_t */
417 /* Virtual address of the HTT/HTC header, added by driver */
418 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
419 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
420 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
421 &msdu_info->htt, &msdu_info->tso_info,
Nirav Shah2e583a02016-04-30 14:06:12 +0530422 NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800423
Nirav Shahcbc6d722016-03-01 16:24:53 +0530424 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800425 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530426 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
427 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800428 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429#if defined(HELIUMPLUS_PADDR64)
430 /*
431 * Use num_frags - 1, since 1 frag is used to store
432 * the HTT/HTC descriptor
433 * Refer to htt_tx_desc_init()
434 */
435 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
436 num_frags - 1);
437#else /* ! defined(HELIUMPLUSPADDR64) */
438 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
439 num_frags-1);
440#endif /* defined(HELIUMPLUS_PADDR64) */
441 if (msdu_info->tso_info.is_tso) {
442 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
443 tx_desc->htt_frag_desc, &msdu_info->tso_info);
444 TXRX_STATS_TSO_SEG_UPDATE(pdev,
445 msdu_info->tso_info.curr_seg->seg);
446 } else {
447 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530448 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530449 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450
Nirav Shahcbc6d722016-03-01 16:24:53 +0530451 frag_len = qdf_nbuf_get_frag_len(msdu, i);
452 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Nirav Shah2e583a02016-04-30 14:06:12 +0530453 if (type != EXT_HEADER_NOT_PRESENT) {
454 frag_paddr +=
455 sizeof(struct htt_tx_msdu_desc_ext_t);
456 frag_len -=
457 sizeof(struct htt_tx_msdu_desc_ext_t);
458 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800459#if defined(HELIUMPLUS_PADDR64)
460 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
461 i - 1, frag_paddr, frag_len);
462#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530463 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800465 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466 dump_pkt(netbuf, frag_paddr, 64);
467#endif /* HELIUMPLUS_DEBUG */
468#else /* ! defined(HELIUMPLUSPADDR64) */
469 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
470 i - 1, frag_paddr, frag_len);
471#endif /* defined(HELIUMPLUS_PADDR64) */
472 }
473 }
474
475 /*
476 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
477 * this is not required. We still have to mark the swap bit correctly,
478 * when posting to the ring
479 */
480 /* Check to make sure, data download length is correct */
481
482 /*
483 * TODO : Can we remove this check and always download a fixed length ?
484 * */
Nirav Shah2e583a02016-04-30 14:06:12 +0530485
486
487 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
488 pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
489
Nirav Shahcbc6d722016-03-01 16:24:53 +0530490 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
491 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492
493 /* Fill the HTC header information */
494 /*
495 * Passing 0 as the seq_no field, we can probably get away
496 * with it for the time being, since this is not checked in f/w
497 */
498 /* TODO : Prefill this, look at multi-fragment case */
499 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
500
501 return tx_desc;
502}
503#if defined(FEATURE_TSO)
504/**
505 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
506 *
507 * @vdev: handle to ol_txrx_vdev_t
508 * @msdu_list: msdu list to be sent out.
509 *
510 * Return: on success return NULL, pointer to nbuf when it fails to send.
511 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530512qdf_nbuf_t
513ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530515 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516 struct ol_txrx_pdev_t *pdev = vdev->pdev;
517 uint32_t pkt_download_len =
518 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
519 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
520 struct ol_txrx_msdu_info_t msdu_info;
521
522 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
523 msdu_info.htt.action.tx_comp_req = 0;
524 /*
525 * The msdu_list variable could be used instead of the msdu var,
526 * but just to clarify which operations are done on a single MSDU
527 * vs. a list of MSDUs, use a distinct variable for single MSDUs
528 * within the list.
529 */
530 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530531 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532 struct ol_tx_desc_t *tx_desc;
533 int segments = 1;
534
Nirav Shahcbc6d722016-03-01 16:24:53 +0530535 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800536 msdu_info.peer = NULL;
537
Anurag Chouhanc5548422016-02-24 18:33:27 +0530538 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530539 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700540 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
541 tx.dropped.host_reject, msdu);
542 return msdu;
543 }
544
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800545 segments = msdu_info.tso_info.num_segs;
Nirav Shahda008342016-05-17 18:50:40 +0530546 TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
547 TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
548 qdf_nbuf_tcp_tso_size(msdu));
549 TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
550 qdf_nbuf_len(msdu));
551 TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
552 qdf_nbuf_get_nr_frags(msdu));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800553
554 /*
555 * The netbuf may get linked into a different list
556 * inside the ce_send_fast function, so store the next
557 * pointer before the ce_send call.
558 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530559 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560 /* init the current segment to the 1st segment in the list */
561 while (segments) {
562
563 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530564 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Dhanashri Atre5166d572016-06-03 14:12:22 -0700565 curr_seg->seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566
567 segments--;
568
569 /**
570 * if this is a jumbo nbuf, then increment the number
571 * of nbuf users for each additional segment of the msdu.
572 * This will ensure that the skb is freed only after
573 * receiving tx completion for all segments of an nbuf
574 */
575 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530576 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800577
578 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
579 msdu_info.htt.info.vdev_id = vdev->vdev_id;
580 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530581 qdf_nbuf_get_tx_cksum(msdu);
582 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530583 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
584 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800585 /* We want to encrypt this frame */
586 msdu_info.htt.action.do_encrypt = 1;
587 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530588 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589 /* We don't want to encrypt this frame */
590 msdu_info.htt.action.do_encrypt = 0;
591 break;
592 default:
593 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530594 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800595 break;
596 }
597
598 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
599 pkt_download_len, ep_id,
600 &msdu_info);
601
Nirav Shahda008342016-05-17 18:50:40 +0530602 TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
603
Anurag Chouhanc5548422016-02-24 18:33:27 +0530604 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530605 DPTRACE(qdf_dp_trace_ptr(msdu,
606 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
607 qdf_nbuf_data_addr(msdu),
608 sizeof(qdf_nbuf_data(msdu)),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530609 tx_desc->id, vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800610 /*
611 * If debug display is enabled, show the meta
612 * data being downloaded to the target via the
613 * HTT tx descriptor.
614 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530615 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER
616 (msdu))
617 pkt_download_len +=
618 sizeof(struct htt_tx_msdu_desc_ext_t);
619
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800620 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah2e583a02016-04-30 14:06:12 +0530621 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
622 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800623 /*
624 * The packet could not be sent.
625 * Free the descriptor, return the
626 * packet to the caller.
627 */
628 ol_tx_desc_free(pdev, tx_desc);
629 return msdu;
630 }
631 if (msdu_info.tso_info.curr_seg) {
632 msdu_info.tso_info.curr_seg =
633 msdu_info.tso_info.curr_seg->next;
634 }
635
636 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530637 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
639 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
640 }
641 } else {
642 TXRX_STATS_MSDU_LIST_INCR(
643 pdev, tx.dropped.host_reject, msdu);
644 /* the list of unaccepted MSDUs */
645 return msdu;
646 }
647 } /* while segments */
648
649 msdu = next;
650 if (msdu_info.tso_info.is_tso) {
651 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
652 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
653 }
654 } /* while msdus */
655 return NULL; /* all MSDUs were accepted */
656}
657#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530658qdf_nbuf_t
659ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800660{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530661 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800662 struct ol_txrx_pdev_t *pdev = vdev->pdev;
663 uint32_t pkt_download_len =
664 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
665 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
666 struct ol_txrx_msdu_info_t msdu_info;
667
668 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
669 msdu_info.htt.action.tx_comp_req = 0;
670 msdu_info.tso_info.is_tso = 0;
671 /*
672 * The msdu_list variable could be used instead of the msdu var,
673 * but just to clarify which operations are done on a single MSDU
674 * vs. a list of MSDUs, use a distinct variable for single MSDUs
675 * within the list.
676 */
677 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530678 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 struct ol_tx_desc_t *tx_desc;
680
Nirav Shahcbc6d722016-03-01 16:24:53 +0530681 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800682 msdu_info.peer = NULL;
683
684 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
685 msdu_info.htt.info.vdev_id = vdev->vdev_id;
686 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530687 qdf_nbuf_get_tx_cksum(msdu);
688 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530689 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
690 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691 /* We want to encrypt this frame */
692 msdu_info.htt.action.do_encrypt = 1;
693 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530694 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 /* We don't want to encrypt this frame */
696 msdu_info.htt.action.do_encrypt = 0;
697 break;
698 default:
699 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530700 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 break;
702 }
703
704 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
705 pkt_download_len, ep_id,
706 &msdu_info);
707
Nirav Shahda008342016-05-17 18:50:40 +0530708 TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
709
Anurag Chouhanc5548422016-02-24 18:33:27 +0530710 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530711 DPTRACE(qdf_dp_trace_ptr(msdu,
712 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
713 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530714 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
715 vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800716 /*
717 * If debug display is enabled, show the meta-data being
718 * downloaded to the target via the HTT tx descriptor.
719 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530720 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
721 pkt_download_len +=
722 sizeof(struct htt_tx_msdu_desc_ext_t);
723
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800724 htt_tx_desc_display(tx_desc->htt_tx_desc);
725 /*
726 * The netbuf may get linked into a different list
727 * inside the ce_send_fast function, so store the next
728 * pointer before the ce_send call.
729 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530730 next = qdf_nbuf_next(msdu);
Nirav Shah2e583a02016-04-30 14:06:12 +0530731 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
732 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733 /* The packet could not be sent */
734 /* Free the descriptor, return the packet to the
735 * caller */
736 ol_tx_desc_free(pdev, tx_desc);
737 return msdu;
738 }
739 msdu = next;
740 } else {
741 TXRX_STATS_MSDU_LIST_INCR(
742 pdev, tx.dropped.host_reject, msdu);
743 return msdu; /* the list of unaccepted MSDUs */
744 }
745 }
746
747 return NULL; /* all MSDUs were accepted */
748}
749#endif /* FEATURE_TSO */
750#endif /* WLAN_FEATURE_FASTPATH */
751
752#ifdef WLAN_FEATURE_FASTPATH
753/**
754 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
755 *
756 */
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800757qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530758ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759{
Komal Seelam3d202862016-02-24 18:43:24 +0530760 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530761 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800762
Anurag Chouhanc5548422016-02-24 18:33:27 +0530763 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
765 else
766 msdu_list = ol_tx_ll(vdev, msdu_list);
767
768 return msdu_list;
769}
770#else
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800771qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530772ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800773{
774 return ol_tx_ll(vdev, msdu_list);
775}
776#endif /* WLAN_FEATURE_FASTPATH */
777
778#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
779
780#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
781#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
782static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
783{
784 int max_to_accept;
785
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530786 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530788 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789 return;
790 }
791
792 /*
793 * Send as much of the backlog as possible, but leave some margin
794 * of unallocated tx descriptors that can be used for new frames
795 * being transmitted by other vdevs.
796 * Ideally there would be a scheduler, which would not only leave
797 * some margin for new frames for other vdevs, but also would
798 * fairly apportion the tx descriptors between multiple vdevs that
799 * have backlogs in their pause queues.
800 * However, the fairness benefit of having a scheduler for frames
801 * from multiple vdev's pause queues is not sufficient to outweigh
802 * the extra complexity.
803 */
804 max_to_accept = vdev->pdev->tx_desc.num_free -
805 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
806 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530807 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800808 max_to_accept--;
809 vdev->ll_pause.txq.depth--;
810 tx_msdu = vdev->ll_pause.txq.head;
811 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530812 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800813 if (NULL == vdev->ll_pause.txq.head)
814 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530815 qdf_nbuf_set_next(tx_msdu, NULL);
816 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
817 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800818 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
819 /*
820 * It is unexpected that ol_tx_ll would reject the frame
821 * since we checked that there's room for it, though
822 * there's an infinitesimal possibility that between the
823 * time we checked the room available and now, a
824 * concurrent batch of tx frames used up all the room.
825 * For simplicity, just drop the frame.
826 */
827 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530828 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530829 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530830 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800831 }
832 }
833 }
834 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530835 qdf_timer_stop(&vdev->ll_pause.timer);
836 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800837 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
838 vdev->ll_pause.is_q_timer_on = true;
839 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
840 vdev->ll_pause.q_overflow_cnt++;
841 }
842
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530843 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800844}
845
Nirav Shahcbc6d722016-03-01 16:24:53 +0530846static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530848 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800849{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530850 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800851 while (msdu_list &&
852 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530853 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
854 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
855 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530856 DPTRACE(qdf_dp_trace(msdu_list,
857 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shah07e39a62016-04-25 17:46:40 +0530858 qdf_nbuf_data_addr(msdu_list),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530859 sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800860
861 vdev->ll_pause.txq.depth++;
862 if (!vdev->ll_pause.txq.head) {
863 vdev->ll_pause.txq.head = msdu_list;
864 vdev->ll_pause.txq.tail = msdu_list;
865 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530866 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800867 }
868 vdev->ll_pause.txq.tail = msdu_list;
869
870 msdu_list = next;
871 }
872 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530873 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800874
875 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530876 qdf_timer_stop(&vdev->ll_pause.timer);
877 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800878 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
879 vdev->ll_pause.is_q_timer_on = true;
880 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530881 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882
883 return msdu_list;
884}
885
886/*
887 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
888 * If there are too many frames in the tx queue, reject it.
889 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530890qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800891{
892 uint16_t eth_type;
893 uint32_t paused_reason;
894
895 if (msdu_list == NULL)
896 return NULL;
897
898 paused_reason = vdev->ll_pause.paused_reason;
899 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530900 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800901 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
902 paused_reason)) {
903 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530904 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800905 ethertype[0] << 8) |
906 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530907 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800908 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
909 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
910 return msdu_list;
911 }
912 }
913 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
914 } else {
915 if (vdev->ll_pause.txq.depth > 0 ||
916 vdev->pdev->tx_throttle.current_throttle_level !=
917 THROTTLE_LEVEL_0) {
918 /* not paused, but there is a backlog of frms
919 from a prior pause or throttle off phase */
920 msdu_list = ol_tx_vdev_pause_queue_append(
921 vdev, msdu_list, 0);
922 /* if throttle is disabled or phase is "on",
923 send the frame */
924 if (vdev->pdev->tx_throttle.current_throttle_level ==
925 THROTTLE_LEVEL_0 ||
926 vdev->pdev->tx_throttle.current_throttle_phase ==
927 THROTTLE_PHASE_ON) {
928 /* send as many frames as possible
929 from the vdevs backlog */
930 ol_tx_vdev_ll_pause_queue_send_base(vdev);
931 }
932 } else {
933 /* not paused, no throttle and no backlog -
934 send the new frames */
935 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
936 }
937 }
938 return msdu_list;
939}
940
941/*
942 * Run through the transmit queues for all the vdevs and
943 * send the pending frames
944 */
945void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
946{
947 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530948 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949 struct ol_txrx_vdev_t *vdev = NULL;
950 uint8_t more;
951
952 if (NULL == pdev)
953 return;
954
955 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
956 return;
957
958 /* ensure that we send no more than tx_threshold frames at once */
959 max_to_send = pdev->tx_throttle.tx_threshold;
960
961 /* round robin through the vdev queues for the given pdev */
962
963 /* Potential improvement: download several frames from the same vdev
964 at a time, since it is more likely that those frames could be
965 aggregated together, remember which vdev was serviced last,
966 so the next call this function can resume the round-robin
967 traversing where the current invocation left off */
968 do {
969 more = 0;
970 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
971
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530972 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800973 if (vdev->ll_pause.txq.depth) {
974 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530975 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800976 mutex);
977 continue;
978 }
979
980 tx_msdu = vdev->ll_pause.txq.head;
981 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530982 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983 mutex);
984 continue;
985 }
986
987 max_to_send--;
988 vdev->ll_pause.txq.depth--;
989
990 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530991 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800992
993 if (NULL == vdev->ll_pause.txq.head)
994 vdev->ll_pause.txq.tail = NULL;
995
Nirav Shahcbc6d722016-03-01 16:24:53 +0530996 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800997 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
998 /*
999 * It is unexpected that ol_tx_ll would reject
1000 * the frame, since we checked that there's
1001 * room for it, though there's an infinitesimal
1002 * possibility that between the time we checked
1003 * the room available and now, a concurrent
1004 * batch of tx frames used up all the room.
1005 * For simplicity, just drop the frame.
1006 */
1007 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301008 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301009 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301010 qdf_nbuf_tx_free(tx_msdu,
1011 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001012 }
1013 }
1014 /*check if there are more msdus to transmit */
1015 if (vdev->ll_pause.txq.depth)
1016 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301017 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001018 }
1019 } while (more && max_to_send);
1020
1021 vdev = NULL;
1022 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301023 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001024 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301025 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1026 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027 &pdev->tx_throttle.tx_timer,
1028 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301029 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001030 return;
1031 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301032 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001033 }
1034}
1035
1036void ol_tx_vdev_ll_pause_queue_send(void *context)
1037{
1038 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1039 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1040
1041 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1042 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1043 return;
1044 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1045}
1046#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1047
1048static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1049{
1050 return
1051 tx_spec &
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001052 (OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001053}
1054
1055static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1056{
1057 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1058
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001059 if (tx_spec & OL_TX_SPEC_NO_AGGR)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001060 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001061 if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001062 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001063 if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1065 return sub_type;
1066}
1067
Nirav Shahcbc6d722016-03-01 16:24:53 +05301068qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001069ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301070 enum ol_tx_spec tx_spec,
1071 qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001072{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301073 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1075 struct ol_txrx_msdu_info_t msdu_info;
1076
1077 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1078 msdu_info.htt.action.tx_comp_req = 0;
1079
1080 /*
1081 * The msdu_list variable could be used instead of the msdu var,
1082 * but just to clarify which operations are done on a single MSDU
1083 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1084 * within the list.
1085 */
1086 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301087 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088 struct ol_tx_desc_t *tx_desc;
1089
Nirav Shahcbc6d722016-03-01 16:24:53 +05301090 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091 msdu_info.peer = NULL;
1092 msdu_info.tso_info.is_tso = 0;
1093
1094 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1095
1096 /*
1097 * The netbuf may get linked into a different list inside the
1098 * ol_tx_send function, so store the next pointer before the
1099 * tx_send call.
1100 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301101 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001103 if (tx_spec != OL_TX_SPEC_STD) {
1104 if (tx_spec & OL_TX_SPEC_NO_FREE) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001105 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001106 } else if (tx_spec & OL_TX_SPEC_TSO) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001107 tx_desc->pkt_type = OL_TX_FRM_TSO;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001108 } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001109 uint8_t sub_type =
1110 ol_txrx_tx_raw_subtype(tx_spec);
1111 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301112 htt_pkt_type_native_wifi,
1113 sub_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001114 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1115 /* different types of raw frames */
1116 uint8_t sub_type =
1117 ol_txrx_tx_raw_subtype(tx_spec);
1118 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301119 htt_pkt_type_raw, sub_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001120 }
1121 }
1122 /*
1123 * If debug display is enabled, show the meta-data being
1124 * downloaded to the target via the HTT tx descriptor.
1125 */
1126 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301127 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001128 msdu = next;
1129 }
1130 return NULL; /* all MSDUs were accepted */
1131}
1132
1133#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1134#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1135 do { \
1136 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301137 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001138 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1139 if (tx_msdu_info.peer) { \
1140 /* remove the peer reference added above */ \
1141 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1142 } \
1143 goto MSDU_LOOP_BOTTOM; \
1144 } \
1145 } while (0)
1146#else
1147#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1148#endif
1149
1150/* tx filtering is handled within the target FW */
1151#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1152
1153/**
1154 * parse_ocb_tx_header() - Function to check for OCB
Nirav Shahcbc6d722016-03-01 16:24:53 +05301155 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301156 * @tx_ctrl: TX control header on a packet and extract it if present
1157 *
1158 * Return: true if ocb parsing is successful
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001159 */
1160#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301161bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001162 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1163{
1164 struct ether_header *eth_hdr_p;
1165 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1166
1167 /* Check if TX control header is present */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301168 eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301169 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001170 /* TX control header is not present. Nothing to do.. */
1171 return true;
1172
1173 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301174 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001175
1176 /* Parse the TX control header */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301177 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001178
1179 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1180 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301181 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301182 sizeof(*tx_ctrl_hdr));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183 } else {
1184 /* The TX control header is invalid. */
1185 return false;
1186 }
1187
1188 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301189 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001190 return true;
1191}
1192
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301193
1194#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
1195
1196/**
1197 * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
1198 * for a HL system.
1199 * @pdev: the data physical device sending the data
1200 * @vdev: the virtual device sending the data
1201 * @msdu: the tx frame
1202 * @msdu_info: the tx meta data
1203 *
1204 * Return: the tx decriptor
1205 */
1206static inline
1207struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1208 struct ol_txrx_vdev_t *vdev,
1209 qdf_nbuf_t msdu,
1210 struct ol_txrx_msdu_info_t *msdu_info)
1211{
1212 struct ol_tx_desc_t *tx_desc = NULL;
1213
1214 if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
1215 TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
1216 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1217 } else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
1218 if ((qdf_nbuf_is_ipv4_dhcp_pkt(msdu) == true) ||
1219 (qdf_nbuf_is_ipv4_eapol_pkt(msdu) == true)) {
1220 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1221 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
1222 "Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
1223 }
1224 }
1225 return tx_desc;
1226}
1227#else
1228
1229static inline
1230struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
1231 struct ol_txrx_vdev_t *vdev,
1232 qdf_nbuf_t msdu,
1233 struct ol_txrx_msdu_info_t *msdu_info)
1234{
1235 struct ol_tx_desc_t *tx_desc = NULL;
1236 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
1237 return tx_desc;
1238}
1239#endif
1240
1241#if defined(CONFIG_HL_SUPPORT)
1242
1243/**
1244 * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
1245 * for management frame
1246 * @pdev: the data physical device sending the data
1247 * @vdev: the virtual device sending the data
1248 * @tx_mgmt_frm: the tx managment frame
1249 * @tx_msdu_info: the tx meta data
1250 *
1251 * Return: the tx decriptor
1252 */
1253static inline
1254struct ol_tx_desc_t *
1255ol_txrx_mgmt_tx_desc_alloc(
1256 struct ol_txrx_pdev_t *pdev,
1257 struct ol_txrx_vdev_t *vdev,
1258 qdf_nbuf_t tx_mgmt_frm,
1259 struct ol_txrx_msdu_info_t *tx_msdu_info)
1260{
1261 struct ol_tx_desc_t *tx_desc;
1262 tx_msdu_info->htt.action.tx_comp_req = 1;
1263 tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1264 return tx_desc;
1265}
1266
1267/**
1268 * ol_txrx_mgmt_send_frame() - send a management frame
1269 * @vdev: virtual device sending the frame
1270 * @tx_desc: tx desc
1271 * @tx_mgmt_frm: management frame to send
1272 * @tx_msdu_info: the tx meta data
1273 * @chanfreq: download change frequency
1274 *
1275 * Return:
1276 * 0 -> the frame is accepted for transmission, -OR-
1277 * 1 -> the frame was not accepted
1278 */
1279static inline
1280int ol_txrx_mgmt_send_frame(
1281 struct ol_txrx_vdev_t *vdev,
1282 struct ol_tx_desc_t *tx_desc,
1283 qdf_nbuf_t tx_mgmt_frm,
1284 struct ol_txrx_msdu_info_t *tx_msdu_info,
1285 uint16_t chanfreq)
1286{
1287 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1288 struct ol_tx_frms_queue_t *txq;
1289 /*
1290 * 1. Look up the peer and queue the frame in the peer's mgmt queue.
1291 * 2. Invoke the download scheduler.
1292 */
1293 txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
1294 if (!txq) {
1295 /*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
1296 msdu);*/
1297 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1298 ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
1299 1 /* error */);
1300 if (tx_msdu_info->peer) {
1301 /* remove the peer reference added above */
1302 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1303 }
1304 return 1; /* can't accept the tx mgmt frame */
1305 }
1306 /* Initialize the HTT tx desc l2 header offset field.
1307 * Even though tx encap does not apply to mgmt frames,
1308 * htt_tx_desc_mpdu_header still needs to be called,
1309 * to specifiy that there was no L2 header added by tx encap,
1310 * so the frame's length does not need to be adjusted to account for
1311 * an added L2 header.
1312 */
1313 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1314 htt_tx_desc_init(
1315 pdev->htt_pdev, tx_desc->htt_tx_desc,
1316 tx_desc->htt_tx_desc_paddr,
1317 ol_tx_desc_id(pdev, tx_desc),
1318 tx_mgmt_frm,
1319 &tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0);
1320 htt_tx_desc_display(tx_desc->htt_tx_desc);
1321 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1322
1323 ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
1324 if (tx_msdu_info->peer) {
1325 /* remove the peer reference added above */
1326 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
1327 }
1328 ol_tx_sched(vdev->pdev);
1329
1330 return 0;
1331}
1332
1333#else
1334
1335static inline
1336struct ol_tx_desc_t *
1337ol_txrx_mgmt_tx_desc_alloc(
1338 struct ol_txrx_pdev_t *pdev,
1339 struct ol_txrx_vdev_t *vdev,
1340 qdf_nbuf_t tx_mgmt_frm,
1341 struct ol_txrx_msdu_info_t *tx_msdu_info)
1342{
1343 struct ol_tx_desc_t *tx_desc;
1344 /* For LL tx_comp_req is not used so initialized to 0 */
1345 tx_msdu_info->htt.action.tx_comp_req = 0;
1346 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
1347 /* FIX THIS -
1348 * The FW currently has trouble using the host's fragments table
1349 * for management frames. Until this is fixed, rather than
1350 * specifying the fragment table to the FW, specify just the
1351 * address of the initial fragment.
1352 */
1353#if defined(HELIUMPLUS_PADDR64)
1354 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1355 tx_desc); */
1356#endif /* defined(HELIUMPLUS_PADDR64) */
1357 if (tx_desc) {
1358 /*
1359 * Following the call to ol_tx_desc_ll, frag 0 is the
1360 * HTT tx HW descriptor, and the frame payload is in
1361 * frag 1.
1362 */
1363 htt_tx_desc_frags_table_set(
1364 pdev->htt_pdev,
1365 tx_desc->htt_tx_desc,
1366 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
1367 0, 0);
1368#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1369 dump_frag_desc(
1370 "after htt_tx_desc_frags_table_set",
1371 tx_desc);
1372#endif /* defined(HELIUMPLUS_PADDR64) */
1373 }
1374
1375 return tx_desc;
1376}
1377
1378static inline
1379int ol_txrx_mgmt_send_frame(
1380 struct ol_txrx_vdev_t *vdev,
1381 struct ol_tx_desc_t *tx_desc,
1382 qdf_nbuf_t tx_mgmt_frm,
1383 struct ol_txrx_msdu_info_t *tx_msdu_info,
1384 uint16_t chanfreq)
1385{
1386 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1387 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1388 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1389 QDF_NBUF_TX_PKT_MGMT_TRACK;
1390 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
1391 htt_pkt_type_mgmt);
1392
1393 return 0;
1394}
1395#endif
1396
1397/**
1398 * ol_tx_hl_base() - send tx frames for a HL system.
1399 * @vdev: the virtual device sending the data
1400 * @tx_spec: indicate what non-standard transmission actions to apply
1401 * @msdu_list: the tx frames to send
1402 * @tx_comp_req: tx completion req
1403 *
1404 * Return: NULL if all MSDUs are accepted
1405 */
1406static inline qdf_nbuf_t
1407ol_tx_hl_base(
1408 ol_txrx_vdev_handle vdev,
1409 enum ol_tx_spec tx_spec,
1410 qdf_nbuf_t msdu_list,
1411 int tx_comp_req)
1412{
1413 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1414 qdf_nbuf_t msdu = msdu_list;
1415 struct ol_txrx_msdu_info_t tx_msdu_info;
1416 struct ocb_tx_ctrl_hdr_t tx_ctrl;
1417
1418 htt_pdev_handle htt_pdev = pdev->htt_pdev;
1419 tx_msdu_info.peer = NULL;
1420 tx_msdu_info.tso_info.is_tso = 0;
1421
1422 /*
1423 * The msdu_list variable could be used instead of the msdu var,
1424 * but just to clarify which operations are done on a single MSDU
1425 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1426 * within the list.
1427 */
1428 while (msdu) {
1429 qdf_nbuf_t next;
1430 struct ol_tx_frms_queue_t *txq;
1431 struct ol_tx_desc_t *tx_desc = NULL;
1432
1433 qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
1434
1435 /*
1436 * The netbuf will get stored into a (peer-TID) tx queue list
1437 * inside the ol_tx_classify_store function or else dropped,
1438 * so store the next pointer immediately.
1439 */
1440 next = qdf_nbuf_next(msdu);
1441
1442 tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
1443
1444 if (!tx_desc) {
1445 /*
1446 * If we're out of tx descs, there's no need to try
1447 * to allocate tx descs for the remaining MSDUs.
1448 */
1449 TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
1450 msdu);
1451 return msdu; /* the list of unaccepted MSDUs */
1452 }
1453
1454 /* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
1455
1456 if (tx_spec != OL_TX_SPEC_STD) {
1457#if defined(FEATURE_WLAN_TDLS)
1458 if (tx_spec & OL_TX_SPEC_NO_FREE) {
1459 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
1460 } else if (tx_spec & OL_TX_SPEC_TSO) {
1461#else
1462 if (tx_spec & OL_TX_SPEC_TSO) {
1463#endif
1464 tx_desc->pkt_type = OL_TX_FRM_TSO;
1465 }
1466 if (ol_txrx_tx_is_raw(tx_spec)) {
1467 /* CHECK THIS: does this need
1468 * to happen after htt_tx_desc_init?
1469 */
1470 /* different types of raw frames */
1471 u_int8_t sub_type =
1472 ol_txrx_tx_raw_subtype(
1473 tx_spec);
1474 htt_tx_desc_type(htt_pdev,
1475 tx_desc->htt_tx_desc,
1476 htt_pkt_type_raw,
1477 sub_type);
1478 }
1479 }
1480
1481 tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
1482 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1483 tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
1484 tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
1485 tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
1486
1487 /* If the vdev is in OCB mode,
1488 * parse the tx control header.
1489 */
1490 if (vdev->opmode == wlan_op_mode_ocb) {
1491 if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
1492 /* There was an error parsing
1493 * the header.Skip this packet.
1494 */
1495 goto MSDU_LOOP_BOTTOM;
1496 }
1497 }
1498
1499 txq = ol_tx_classify(vdev, tx_desc, msdu,
1500 &tx_msdu_info);
1501
1502 if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
1503 /* drop this frame,
1504 * but try sending subsequent frames
1505 */
1506 /*TXRX_STATS_MSDU_LIST_INCR(pdev,
1507 tx.dropped.no_txq,
1508 msdu);*/
1509 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
1510 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
1511 if (tx_msdu_info.peer) {
1512 /* remove the peer reference
1513 * added above */
1514 ol_txrx_peer_unref_delete(
1515 tx_msdu_info.peer);
1516 }
1517 goto MSDU_LOOP_BOTTOM;
1518 }
1519
1520 if (tx_msdu_info.peer) {
1521 /*If the state is not associated then drop all
1522 *the data packets received for that peer*/
1523 if (tx_msdu_info.peer->state ==
1524 OL_TXRX_PEER_STATE_DISC) {
1525 qdf_atomic_inc(
1526 &pdev->tx_queue.rsrc_cnt);
1527 ol_tx_desc_frame_free_nonstd(pdev,
1528 tx_desc,
1529 1);
1530 ol_txrx_peer_unref_delete(
1531 tx_msdu_info.peer);
1532 msdu = next;
1533 continue;
1534 } else if (tx_msdu_info.peer->state !=
1535 OL_TXRX_PEER_STATE_AUTH) {
1536 if (tx_msdu_info.htt.info.ethertype !=
1537 ETHERTYPE_PAE &&
1538 tx_msdu_info.htt.info.ethertype
1539 != ETHERTYPE_WAI) {
1540 qdf_atomic_inc(
1541 &pdev->tx_queue.
1542 rsrc_cnt);
1543 ol_tx_desc_frame_free_nonstd(
1544 pdev,
1545 tx_desc, 1);
1546 ol_txrx_peer_unref_delete(
1547 tx_msdu_info.peer);
1548 msdu = next;
1549 continue;
1550 }
1551 }
1552 }
1553 /*
1554 * Initialize the HTT tx desc l2 header offset field.
1555 * htt_tx_desc_mpdu_header needs to be called to
1556 * make sure, the l2 header size is initialized
1557 * correctly to handle cases where TX ENCAP is disabled
1558 * or Tx Encap fails to perform Encap
1559 */
1560 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
1561
1562 /*
1563 * Note: when the driver is built without support for
1564 * SW tx encap,the following macro is a no-op.
1565 * When the driver is built with support for SW tx
1566 * encap, it performs encap, and if an error is
1567 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
1568 */
1569 OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
1570 tx_msdu_info);
1571
1572 /* initialize the HW tx descriptor */
1573 htt_tx_desc_init(
1574 pdev->htt_pdev, tx_desc->htt_tx_desc,
1575 tx_desc->htt_tx_desc_paddr,
1576 ol_tx_desc_id(pdev, tx_desc),
1577 msdu,
1578 &tx_msdu_info.htt,
1579 &tx_msdu_info.tso_info,
1580 &tx_ctrl,
1581 vdev->opmode == wlan_op_mode_ocb);
1582 /*
1583 * If debug display is enabled, show the meta-data
1584 * being downloaded to the target via the
1585 * HTT tx descriptor.
1586 */
1587 htt_tx_desc_display(tx_desc->htt_tx_desc);
1588
1589 ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
1590 if (tx_msdu_info.peer) {
1591 OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
1592 msdu);
1593 /* remove the peer reference added above */
1594 ol_txrx_peer_unref_delete(tx_msdu_info.peer);
1595 }
1596MSDU_LOOP_BOTTOM:
1597 msdu = next;
1598 }
1599 ol_tx_sched(pdev);
1600 return NULL; /* all MSDUs were accepted */
1601}
1602
1603qdf_nbuf_t
1604ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1605{
1606 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1607 int tx_comp_req = pdev->cfg.default_tx_comp_req;
1608 return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
1609}
1610
1611qdf_nbuf_t
1612ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
1613 enum ol_tx_spec tx_spec,
1614 qdf_nbuf_t msdu_list)
1615{
1616 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1617 int tx_comp_req = pdev->cfg.default_tx_comp_req;
1618
1619 if (!tx_comp_req) {
1620 if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1621 (pdev->tx_data_callback.func))
1622 tx_comp_req = 1;
1623 }
1624 return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
1625}
1626
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001627/**
1628 * ol_tx_non_std - Allow the control-path SW to send data frames
1629 *
1630 * @data_vdev - which vdev should transmit the tx data frames
1631 * @tx_spec - what non-standard handling to apply to the tx data frames
1632 * @msdu_list - NULL-terminated list of tx MSDUs
1633 *
1634 * Generally, all tx data frames come from the OS shim into the txrx layer.
1635 * However, there are rare cases such as TDLS messaging where the UMAC
1636 * control-path SW creates tx data frames.
1637 * This UMAC SW can call this function to provide the tx data frames to
1638 * the txrx layer.
1639 * The UMAC SW can request a callback for these data frames after their
1640 * transmission completes, by using the ol_txrx_data_tx_cb_set function
1641 * to register a tx completion callback, and by specifying
1642 * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
1643 * ol_tx_non_std.
1644 * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
1645 * as specified by ol_cfg_frame_type().
1646 *
1647 * Return: null - success, skb - failure
1648 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301649qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001650ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301651 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001652{
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301653 if (vdev->pdev->cfg.is_high_latency)
1654 return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
1655 else
1656 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001657}
1658
1659void
1660ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1661 ol_txrx_data_tx_cb callback, void *ctxt)
1662{
1663 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1664 pdev->tx_data_callback.func = callback;
1665 pdev->tx_data_callback.ctxt = ctxt;
1666}
1667
Dhanashri Atre12a08392016-02-17 13:10:34 -08001668/**
1669 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1670 * notifications for management frames.
1671 *
1672 * @pdev - the data physical device object
1673 * @type - the type of mgmt frame the callback is used for
1674 * @download_cb - the callback for notification of delivery to the target
1675 * @ota_ack_cb - the callback for notification of delivery to the peer
1676 * @ctxt - context to use with the callback
1677 *
1678 * When the txrx SW receives notifications from the target that a tx frame
1679 * has been delivered to its recipient, it will check if the tx frame
1680 * is a management frame. If so, the txrx SW will check the management
1681 * frame type specified when the frame was submitted for transmission.
1682 * If there is a callback function registered for the type of managment
1683 * frame in question, the txrx code will invoke the callback to inform
1684 * the management + control SW that the mgmt frame was delivered.
1685 * This function is used by the control SW to store a callback pointer
1686 * for a given type of management frame.
1687 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001688void
1689ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1690 uint8_t type,
1691 ol_txrx_mgmt_tx_cb download_cb,
1692 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1693{
1694 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1695 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1696 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1697 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1698}
1699
1700#if defined(HELIUMPLUS_PADDR64)
1701void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1702{
1703 uint32_t *frag_ptr_i_p;
1704 int i;
1705
Anurag Chouhan6d760662016-02-20 16:05:43 +05301706 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001707 tx_desc, tx_desc->id);
Hardik Kantilal Patele9b97ea2016-07-14 17:26:13 +05301708 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: %pad",
1709 tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
1710 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=%pad)",
1711 __func__, __LINE__, tx_desc->htt_frag_desc,
1712 &tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001713
1714 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1715 is already de-referrable (=> in virtual address space) */
1716 frag_ptr_i_p = tx_desc->htt_frag_desc;
1717
1718 /* Dump 6 words of TSO flags */
1719 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1720 DUMP_PREFIX_NONE, 8, 4,
1721 frag_ptr_i_p, 24, true);
1722
1723 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1724
1725 i = 0;
1726 while (*frag_ptr_i_p) {
1727 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1728 DUMP_PREFIX_NONE, 8, 4,
1729 frag_ptr_i_p, 8, true);
1730 i++;
1731 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1732 break;
1733 else /* jump to next pointer - skip length */
1734 frag_ptr_i_p += 2;
1735 }
1736 return;
1737}
1738#endif /* HELIUMPLUS_PADDR64 */
1739
Dhanashri Atre12a08392016-02-17 13:10:34 -08001740/**
1741 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1742 *
1743 * @vdev - virtual device transmitting the frame
1744 * @tx_mgmt_frm - management frame to transmit
1745 * @type - the type of managment frame (determines what callback to use)
1746 * @use_6mbps - specify whether management frame to transmit should
1747 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1748 * @chanfreq - channel to transmit the frame on
1749 *
1750 * Send the specified management frame from the specified virtual device.
1751 * The type is used for determining whether to invoke a callback to inform
1752 * the sender that the tx mgmt frame was delivered, and if so, which
1753 * callback to use.
1754 *
1755 * Return: 0 - the frame is accepted for transmission
1756 * 1 - the frame was not accepted
1757 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001758int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001759ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301760 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001761 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1762{
1763 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1764 struct ol_tx_desc_t *tx_desc;
1765 struct ol_txrx_msdu_info_t tx_msdu_info;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301766 int result = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001767 tx_msdu_info.tso_info.is_tso = 0;
1768
1769 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1770 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1771 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1772 tx_msdu_info.htt.action.do_tx_complete =
1773 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1774
1775 /*
1776 * FIX THIS: l2_hdr_type should only specify L2 header type
1777 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1778 * that is a combination of L2 header type and 802.11 frame type.
1779 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1780 * But if the 802.11 frame type is "data", then the HTT pkt type is
1781 * the L2 header type (more or less): 802.3 vs. Native WiFi
1782 * (basic 802.11).
1783 * (Or the header type can be "raw", which is any version of the 802.11
1784 * header, and also implies that some of the offloaded tx data
1785 * processing steps may not apply.)
1786 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1787 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1788 * needs to overload the l2_hdr_type to indicate whether the frame is
1789 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1790 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1791 * just the L2 header type. For mgmt frames, there should be a
1792 * separate function to patch the HTT pkt type to store a "mgmt" value
1793 * rather than the L2 header type. Then the HTT pkt type can be
1794 * programmed efficiently for data frames, and the msdu_info's
1795 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1796 * frame type rather than the L2 header type.
1797 */
1798 /*
1799 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1800 * htt_pkt_type_mgmt
1801 * The htt module expects a "enum htt_pkt_type" value.
1802 * The htt_dxe module expects a "enum htt_frm_type" value.
1803 * This needs to be cleaned up, so both versions of htt use a
1804 * consistent method of specifying the frame type.
1805 */
1806#ifdef QCA_SUPPORT_INTEGRATED_SOC
1807 /* tx mgmt frames always come with a 802.11 header */
1808 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1809 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1810#else
1811 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1812 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1813#endif
1814
1815 tx_msdu_info.peer = NULL;
1816
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301817 tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm,
1818 &tx_msdu_info);
Nirav Shah2e583a02016-04-30 14:06:12 +05301819 if (!tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001820 return -EINVAL; /* can't accept the tx mgmt frame */
Nirav Shah2e583a02016-04-30 14:06:12 +05301821
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001822 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1823 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1824 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1825
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301826 result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm,
1827 &tx_msdu_info, chanfreq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001828
1829 return 0; /* accepted the tx mgmt frame */
1830}
1831
1832void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1833{
1834 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1835}
1836
Nirav Shahcbc6d722016-03-01 16:24:53 +05301837qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1838 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001839{
1840 struct ol_tx_desc_t *tx_desc;
1841 struct ol_txrx_msdu_info_t msdu_info;
1842
1843 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1844 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1845 msdu_info.peer = NULL;
1846 msdu_info.htt.action.tx_comp_req = 0;
1847 msdu_info.tso_info.is_tso = 0;
1848
1849 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1850 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1851
1852 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1853
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301854 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001855
1856 return NULL;
1857}
1858
1859#if defined(FEATURE_TSO)
1860void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1861{
1862 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301863 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001864
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301865 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001866 pdev->tso_seg_pool.freelist = c_element;
1867 for (i = 0; i < (num_seg - 1); i++) {
1868 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301869 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001870 c_element = c_element->next;
1871 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001872 }
Leo Chang376398b2015-10-23 14:19:02 -07001873 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301874 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001875}
1876
1877void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1878{
Leo Chang376398b2015-10-23 14:19:02 -07001879 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301880 struct qdf_tso_seg_elem_t *c_element;
1881 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001882
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301883 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001884 c_element = pdev->tso_seg_pool.freelist;
1885 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1886 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301887 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001888 c_element = temp;
1889 if (!c_element)
1890 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001891 }
1892
1893 pdev->tso_seg_pool.freelist = NULL;
1894 pdev->tso_seg_pool.num_free = 0;
1895 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301896 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1897 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001898}
1899#endif /* FEATURE_TSO */