blob: 789dae36d7bbcc76ae35ce91884d59301da5414e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49
50#ifdef WLAN_FEATURE_FASTPATH
51#include <hif.h> /* HIF_DEVICE */
52#include <htc_api.h> /* Layering violation, but required for fast path */
53#include <htt_internal.h>
54#include <htt_types.h> /* htc_endpoint */
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070055#include <cdp_txrx_peer_ops.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080056
Nirav Shah2e583a02016-04-30 14:06:12 +053057int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
58 unsigned int transfer_id, uint32_t download_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059#endif /* WLAN_FEATURE_FASTPATH */
60
61/*
62 * The TXRX module doesn't accept tx frames unless the target has
63 * enough descriptors for them.
64 * For LL, the TXRX descriptor pool is sized to match the target's
65 * descriptor pool. Hence, if the descriptor allocation in TXRX
66 * succeeds, that guarantees that the target has room to accept
67 * the new tx frame.
68 */
69#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
70 do { \
71 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
72 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
73 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053074 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075 TXRX_STATS_MSDU_LIST_INCR( \
76 pdev, tx.dropped.host_reject, msdu); \
77 return msdu; /* the list of unaccepted MSDUs */ \
78 } \
79 } while (0)
80
Dhanashri Atre83d373d2015-07-28 16:45:59 -070081#if defined(FEATURE_TSO)
82/**
83 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
84 * related information in the msdu_info meta data
85 * @vdev: virtual device handle
86 * @msdu: network buffer
87 * @msdu_info: meta data associated with the msdu
88 *
89 * Return: 0 - success, >0 - error
90 */
91static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053092 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070093{
94 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053095 if (qdf_nbuf_is_tso(msdu)) {
96 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -070097 msdu_info->tso_info.tso_seg_list = NULL;
98 msdu_info->tso_info.num_segs = num_seg;
99 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530100 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700101 ol_tso_alloc_segment(vdev->pdev);
102 if (tso_seg) {
103 tso_seg->next =
104 msdu_info->tso_info.tso_seg_list;
105 msdu_info->tso_info.tso_seg_list
106 = tso_seg;
107 num_seg--;
108 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530109 struct qdf_tso_seg_elem_t *next_seg;
110 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700111 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530112 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700113 while (free_seg) {
114 next_seg = free_seg->next;
115 ol_tso_free_segment(vdev->pdev,
116 free_seg);
117 free_seg = next_seg;
118 }
119 return 1;
120 }
121 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530122 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700123 msdu, &(msdu_info->tso_info));
124 msdu_info->tso_info.curr_seg =
125 msdu_info->tso_info.tso_seg_list;
126 num_seg = msdu_info->tso_info.num_segs;
127 } else {
128 msdu_info->tso_info.is_tso = 0;
129 msdu_info->tso_info.num_segs = 1;
130 }
131 return 0;
132}
133#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800134
135/**
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800136 * ol_tx_data() - send data frame
137 * @vdev: virtual device handle
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800138 * @skb: skb
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800139 *
140 * Return: skb/NULL for success
141 */
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800142qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143{
Mohit Khanna043efbd2016-05-04 14:19:35 -0700144 struct ol_txrx_pdev_t *pdev;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530145 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800146
Mohit Khanna043efbd2016-05-04 14:19:35 -0700147 if (qdf_unlikely(!vdev)) {
148 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
149 "%s:vdev is null", __func__);
150 return skb;
151 } else {
152 pdev = vdev->pdev;
153 }
154
Anurag Chouhanc5548422016-02-24 18:33:27 +0530155 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530156 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157 "%s:pdev is null", __func__);
158 return skb;
159 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530162 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
163 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
164 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165
166 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530167 qdf_nbuf_set_next(skb, NULL);
Dhanashri Atre168d2b42016-02-22 14:43:06 -0800168 ret = OL_TX_LL(vdev, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800169 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171 "%s: Failed to tx", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 return ret;
173 }
174
175 return NULL;
176}
177
178#ifdef IPA_OFFLOAD
179/**
180 * ol_tx_send_ipa_data_frame() - send IPA data frame
181 * @vdev: vdev
182 * @skb: skb
183 *
184 * Return: skb/ NULL is for success
185 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530186qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
187 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800188{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530189 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530190 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191
Anurag Chouhanc5548422016-02-24 18:33:27 +0530192 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
194 "%s: pdev is NULL", __func__);
195 return skb;
196 }
197
198 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530199 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
200 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
201 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202
203 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530204 qdf_nbuf_set_next(skb, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
206 if (ret) {
207 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
208 "%s: Failed to tx", __func__);
209 return ret;
210 }
211
212 return NULL;
213}
214#endif
215
216
217#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530218qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530220 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800221 struct ol_txrx_msdu_info_t msdu_info;
222
223 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
224 msdu_info.htt.action.tx_comp_req = 0;
225 /*
226 * The msdu_list variable could be used instead of the msdu var,
227 * but just to clarify which operations are done on a single MSDU
228 * vs. a list of MSDUs, use a distinct variable for single MSDUs
229 * within the list.
230 */
231 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530232 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 struct ol_tx_desc_t *tx_desc;
234 int segments = 1;
235
Nirav Shahcbc6d722016-03-01 16:24:53 +0530236 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237 msdu_info.peer = NULL;
238
Anurag Chouhanc5548422016-02-24 18:33:27 +0530239 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530240 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700241 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
242 tx.dropped.host_reject, msdu);
243 return msdu;
244 }
245
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800246 segments = msdu_info.tso_info.num_segs;
247
248 /*
249 * The netbuf may get linked into a different list inside the
250 * ol_tx_send function, so store the next pointer before the
251 * tx_send call.
252 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530253 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800254 /* init the current segment to the 1st segment in the list */
255 while (segments) {
256
257 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530258 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800259 msdu_info.tso_info.curr_seg->
Dhanashri Atre5166d572016-06-03 14:12:22 -0700260 seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261
262 segments--;
263
264 /**
265 * if this is a jumbo nbuf, then increment the number
266 * of nbuf users for each additional segment of the msdu.
267 * This will ensure that the skb is freed only after
268 * receiving tx completion for all segments of an nbuf
269 */
270 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530271 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272
273 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
274
275 /*
276 * If debug display is enabled, show the meta-data being
277 * downloaded to the target via the HTT tx descriptor.
278 */
279 htt_tx_desc_display(tx_desc->htt_tx_desc);
280
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530281 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800282
283 if (msdu_info.tso_info.curr_seg) {
284 msdu_info.tso_info.curr_seg =
285 msdu_info.tso_info.curr_seg->next;
286 }
287
Nirav Shahcbc6d722016-03-01 16:24:53 +0530288 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289
290 if (msdu_info.tso_info.is_tso) {
291 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
292 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
293 }
294 } /* while segments */
295
296 msdu = next;
297 if (msdu_info.tso_info.is_tso) {
298 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
299 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
300 }
301 } /* while msdus */
302 return NULL; /* all MSDUs were accepted */
303}
304#else /* TSO */
305
Nirav Shahcbc6d722016-03-01 16:24:53 +0530306qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530308 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 struct ol_txrx_msdu_info_t msdu_info;
310
311 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
312 msdu_info.htt.action.tx_comp_req = 0;
313 msdu_info.tso_info.is_tso = 0;
314 /*
315 * The msdu_list variable could be used instead of the msdu var,
316 * but just to clarify which operations are done on a single MSDU
317 * vs. a list of MSDUs, use a distinct variable for single MSDUs
318 * within the list.
319 */
320 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530321 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 struct ol_tx_desc_t *tx_desc;
323
Nirav Shahcbc6d722016-03-01 16:24:53 +0530324 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 msdu_info.peer = NULL;
326 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
327
328 /*
329 * If debug display is enabled, show the meta-data being
330 * downloaded to the target via the HTT tx descriptor.
331 */
332 htt_tx_desc_display(tx_desc->htt_tx_desc);
333 /*
334 * The netbuf may get linked into a different list inside the
335 * ol_tx_send function, so store the next pointer before the
336 * tx_send call.
337 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530338 next = qdf_nbuf_next(msdu);
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530339 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 msdu = next;
341 }
342 return NULL; /* all MSDUs were accepted */
343}
344#endif /* TSO */
345
346#ifdef WLAN_FEATURE_FASTPATH
347/**
348 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
349 *
350 * Allocate and prepare Tx descriptor with msdu and fragment descritor
351 * inforamtion.
352 *
353 * @pdev: pointer to ol pdev handle
354 * @vdev: pointer to ol vdev handle
355 * @msdu: linked list of msdu packets
356 * @pkt_download_len: packet download length
357 * @ep_id: endpoint ID
358 * @msdu_info: Handle to msdu_info
359 *
360 * Return: Pointer to Tx descriptor
361 */
362static inline struct ol_tx_desc_t *
363ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530364 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365 uint32_t pkt_download_len, uint32_t ep_id,
366 struct ol_txrx_msdu_info_t *msdu_info)
367{
368 struct ol_tx_desc_t *tx_desc = NULL;
369 uint32_t *htt_tx_desc;
370 void *htc_hdr_vaddr;
371 u_int32_t num_frags, i;
Nirav Shah2e583a02016-04-30 14:06:12 +0530372 enum extension_header_type type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373
374 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530375 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 return NULL;
377
378 tx_desc->netbuf = msdu;
379 if (msdu_info->tso_info.is_tso) {
380 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700381 tx_desc->pkt_type = OL_TX_FRM_TSO;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
383 } else {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700384 tx_desc->pkt_type = OL_TX_FRM_STD;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385 }
386
387 htt_tx_desc = tx_desc->htt_tx_desc;
388
389 /* Make sure frags num is set to 0 */
390 /*
391 * Do this here rather than in hardstart, so
392 * that we can hopefully take only one cache-miss while
393 * accessing skb->cb.
394 */
395
396 /* HTT Header */
397 /* TODO : Take care of multiple fragments */
398
Nirav Shah2e583a02016-04-30 14:06:12 +0530399 type = ol_tx_get_ext_header_type(vdev, msdu);
400
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401 /* TODO: Precompute and store paddr in ol_tx_desc_t */
402 /* Virtual address of the HTT/HTC header, added by driver */
403 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
404 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
405 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
406 &msdu_info->htt, &msdu_info->tso_info,
Nirav Shah2e583a02016-04-30 14:06:12 +0530407 NULL, type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408
Nirav Shahcbc6d722016-03-01 16:24:53 +0530409 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800410 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530411 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
412 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800413 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414#if defined(HELIUMPLUS_PADDR64)
415 /*
416 * Use num_frags - 1, since 1 frag is used to store
417 * the HTT/HTC descriptor
418 * Refer to htt_tx_desc_init()
419 */
420 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
421 num_frags - 1);
422#else /* ! defined(HELIUMPLUSPADDR64) */
423 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
424 num_frags-1);
425#endif /* defined(HELIUMPLUS_PADDR64) */
426 if (msdu_info->tso_info.is_tso) {
427 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
428 tx_desc->htt_frag_desc, &msdu_info->tso_info);
429 TXRX_STATS_TSO_SEG_UPDATE(pdev,
430 msdu_info->tso_info.curr_seg->seg);
431 } else {
432 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530433 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530434 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800435
Nirav Shahcbc6d722016-03-01 16:24:53 +0530436 frag_len = qdf_nbuf_get_frag_len(msdu, i);
437 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Nirav Shah2e583a02016-04-30 14:06:12 +0530438 if (type != EXT_HEADER_NOT_PRESENT) {
439 frag_paddr +=
440 sizeof(struct htt_tx_msdu_desc_ext_t);
441 frag_len -=
442 sizeof(struct htt_tx_msdu_desc_ext_t);
443 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444#if defined(HELIUMPLUS_PADDR64)
445 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
446 i - 1, frag_paddr, frag_len);
447#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530448 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800450 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800451 dump_pkt(netbuf, frag_paddr, 64);
452#endif /* HELIUMPLUS_DEBUG */
453#else /* ! defined(HELIUMPLUSPADDR64) */
454 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
455 i - 1, frag_paddr, frag_len);
456#endif /* defined(HELIUMPLUS_PADDR64) */
457 }
458 }
459
460 /*
461 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
462 * this is not required. We still have to mark the swap bit correctly,
463 * when posting to the ring
464 */
465 /* Check to make sure, data download length is correct */
466
467 /*
468 * TODO : Can we remove this check and always download a fixed length ?
469 * */
Nirav Shah2e583a02016-04-30 14:06:12 +0530470
471
472 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
473 pkt_download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
474
Nirav Shahcbc6d722016-03-01 16:24:53 +0530475 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
476 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477
478 /* Fill the HTC header information */
479 /*
480 * Passing 0 as the seq_no field, we can probably get away
481 * with it for the time being, since this is not checked in f/w
482 */
483 /* TODO : Prefill this, look at multi-fragment case */
484 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
485
486 return tx_desc;
487}
488#if defined(FEATURE_TSO)
489/**
490 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
491 *
492 * @vdev: handle to ol_txrx_vdev_t
493 * @msdu_list: msdu list to be sent out.
494 *
495 * Return: on success return NULL, pointer to nbuf when it fails to send.
496 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530497qdf_nbuf_t
498ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530500 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 struct ol_txrx_pdev_t *pdev = vdev->pdev;
502 uint32_t pkt_download_len =
503 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
504 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
505 struct ol_txrx_msdu_info_t msdu_info;
506
507 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
508 msdu_info.htt.action.tx_comp_req = 0;
509 /*
510 * The msdu_list variable could be used instead of the msdu var,
511 * but just to clarify which operations are done on a single MSDU
512 * vs. a list of MSDUs, use a distinct variable for single MSDUs
513 * within the list.
514 */
515 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530516 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800517 struct ol_tx_desc_t *tx_desc;
518 int segments = 1;
519
Nirav Shahcbc6d722016-03-01 16:24:53 +0530520 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521 msdu_info.peer = NULL;
522
Anurag Chouhanc5548422016-02-24 18:33:27 +0530523 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530524 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700525 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
526 tx.dropped.host_reject, msdu);
527 return msdu;
528 }
529
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800530 segments = msdu_info.tso_info.num_segs;
531
532 /*
533 * The netbuf may get linked into a different list
534 * inside the ce_send_fast function, so store the next
535 * pointer before the ce_send call.
536 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530537 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800538 /* init the current segment to the 1st segment in the list */
539 while (segments) {
540
541 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530542 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Dhanashri Atre5166d572016-06-03 14:12:22 -0700543 curr_seg->seg.tso_frags[0].paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544
545 segments--;
546
547 /**
548 * if this is a jumbo nbuf, then increment the number
549 * of nbuf users for each additional segment of the msdu.
550 * This will ensure that the skb is freed only after
551 * receiving tx completion for all segments of an nbuf
552 */
553 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530554 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555
556 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
557 msdu_info.htt.info.vdev_id = vdev->vdev_id;
558 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530559 qdf_nbuf_get_tx_cksum(msdu);
560 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530561 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
562 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563 /* We want to encrypt this frame */
564 msdu_info.htt.action.do_encrypt = 1;
565 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530566 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567 /* We don't want to encrypt this frame */
568 msdu_info.htt.action.do_encrypt = 0;
569 break;
570 default:
571 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530572 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 break;
574 }
575
576 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
577 pkt_download_len, ep_id,
578 &msdu_info);
579
Anurag Chouhanc5548422016-02-24 18:33:27 +0530580 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530581 DPTRACE(qdf_dp_trace_ptr(msdu,
582 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
583 qdf_nbuf_data_addr(msdu),
584 sizeof(qdf_nbuf_data(msdu)),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530585 tx_desc->id, vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800586 /*
587 * If debug display is enabled, show the meta
588 * data being downloaded to the target via the
589 * HTT tx descriptor.
590 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530591 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER
592 (msdu))
593 pkt_download_len +=
594 sizeof(struct htt_tx_msdu_desc_ext_t);
595
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah2e583a02016-04-30 14:06:12 +0530597 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
598 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599 /*
600 * The packet could not be sent.
601 * Free the descriptor, return the
602 * packet to the caller.
603 */
604 ol_tx_desc_free(pdev, tx_desc);
605 return msdu;
606 }
607 if (msdu_info.tso_info.curr_seg) {
608 msdu_info.tso_info.curr_seg =
609 msdu_info.tso_info.curr_seg->next;
610 }
611
612 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530613 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800614 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
615 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
616 }
617 } else {
618 TXRX_STATS_MSDU_LIST_INCR(
619 pdev, tx.dropped.host_reject, msdu);
620 /* the list of unaccepted MSDUs */
621 return msdu;
622 }
623 } /* while segments */
624
625 msdu = next;
626 if (msdu_info.tso_info.is_tso) {
627 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
628 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
629 }
630 } /* while msdus */
631 return NULL; /* all MSDUs were accepted */
632}
633#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530634qdf_nbuf_t
635ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800636{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530637 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 struct ol_txrx_pdev_t *pdev = vdev->pdev;
639 uint32_t pkt_download_len =
640 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
641 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
642 struct ol_txrx_msdu_info_t msdu_info;
643
644 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
645 msdu_info.htt.action.tx_comp_req = 0;
646 msdu_info.tso_info.is_tso = 0;
647 /*
648 * The msdu_list variable could be used instead of the msdu var,
649 * but just to clarify which operations are done on a single MSDU
650 * vs. a list of MSDUs, use a distinct variable for single MSDUs
651 * within the list.
652 */
653 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530654 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655 struct ol_tx_desc_t *tx_desc;
656
Nirav Shahcbc6d722016-03-01 16:24:53 +0530657 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800658 msdu_info.peer = NULL;
659
660 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
661 msdu_info.htt.info.vdev_id = vdev->vdev_id;
662 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530663 qdf_nbuf_get_tx_cksum(msdu);
664 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530665 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
666 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800667 /* We want to encrypt this frame */
668 msdu_info.htt.action.do_encrypt = 1;
669 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530670 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671 /* We don't want to encrypt this frame */
672 msdu_info.htt.action.do_encrypt = 0;
673 break;
674 default:
675 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530676 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800677 break;
678 }
679
680 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
681 pkt_download_len, ep_id,
682 &msdu_info);
683
Anurag Chouhanc5548422016-02-24 18:33:27 +0530684 if (qdf_likely(tx_desc)) {
Nirav Shah07e39a62016-04-25 17:46:40 +0530685 DPTRACE(qdf_dp_trace_ptr(msdu,
686 QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
687 qdf_nbuf_data_addr(msdu),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530688 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
689 vdev->vdev_id));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 /*
691 * If debug display is enabled, show the meta-data being
692 * downloaded to the target via the HTT tx descriptor.
693 */
Nirav Shah2e583a02016-04-30 14:06:12 +0530694 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
695 pkt_download_len +=
696 sizeof(struct htt_tx_msdu_desc_ext_t);
697
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800698 htt_tx_desc_display(tx_desc->htt_tx_desc);
699 /*
700 * The netbuf may get linked into a different list
701 * inside the ce_send_fast function, so store the next
702 * pointer before the ce_send call.
703 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530704 next = qdf_nbuf_next(msdu);
Nirav Shah2e583a02016-04-30 14:06:12 +0530705 if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
706 ep_id, pkt_download_len))) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800707 /* The packet could not be sent */
708 /* Free the descriptor, return the packet to the
709 * caller */
710 ol_tx_desc_free(pdev, tx_desc);
711 return msdu;
712 }
713 msdu = next;
714 } else {
715 TXRX_STATS_MSDU_LIST_INCR(
716 pdev, tx.dropped.host_reject, msdu);
717 return msdu; /* the list of unaccepted MSDUs */
718 }
719 }
720
721 return NULL; /* all MSDUs were accepted */
722}
723#endif /* FEATURE_TSO */
724#endif /* WLAN_FEATURE_FASTPATH */
725
726#ifdef WLAN_FEATURE_FASTPATH
727/**
728 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
729 *
730 */
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800731qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530732ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733{
Komal Seelam3d202862016-02-24 18:43:24 +0530734 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530735 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736
Anurag Chouhanc5548422016-02-24 18:33:27 +0530737 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
739 else
740 msdu_list = ol_tx_ll(vdev, msdu_list);
741
742 return msdu_list;
743}
744#else
Manjunathappa Prakash7e16b0a2016-03-08 15:24:48 -0800745qdf_nbuf_t
Nirav Shahcbc6d722016-03-01 16:24:53 +0530746ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747{
748 return ol_tx_ll(vdev, msdu_list);
749}
750#endif /* WLAN_FEATURE_FASTPATH */
751
752#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
753
754#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
755#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
756static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
757{
758 int max_to_accept;
759
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530760 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530762 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800763 return;
764 }
765
766 /*
767 * Send as much of the backlog as possible, but leave some margin
768 * of unallocated tx descriptors that can be used for new frames
769 * being transmitted by other vdevs.
770 * Ideally there would be a scheduler, which would not only leave
771 * some margin for new frames for other vdevs, but also would
772 * fairly apportion the tx descriptors between multiple vdevs that
773 * have backlogs in their pause queues.
774 * However, the fairness benefit of having a scheduler for frames
775 * from multiple vdev's pause queues is not sufficient to outweigh
776 * the extra complexity.
777 */
778 max_to_accept = vdev->pdev->tx_desc.num_free -
779 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
780 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530781 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800782 max_to_accept--;
783 vdev->ll_pause.txq.depth--;
784 tx_msdu = vdev->ll_pause.txq.head;
785 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530786 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800787 if (NULL == vdev->ll_pause.txq.head)
788 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530789 qdf_nbuf_set_next(tx_msdu, NULL);
790 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
791 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800792 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
793 /*
794 * It is unexpected that ol_tx_ll would reject the frame
795 * since we checked that there's room for it, though
796 * there's an infinitesimal possibility that between the
797 * time we checked the room available and now, a
798 * concurrent batch of tx frames used up all the room.
799 * For simplicity, just drop the frame.
800 */
801 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530802 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530803 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530804 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800805 }
806 }
807 }
808 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530809 qdf_timer_stop(&vdev->ll_pause.timer);
810 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800811 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
812 vdev->ll_pause.is_q_timer_on = true;
813 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
814 vdev->ll_pause.q_overflow_cnt++;
815 }
816
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530817 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800818}
819
Nirav Shahcbc6d722016-03-01 16:24:53 +0530820static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530822 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800823{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530824 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825 while (msdu_list &&
826 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530827 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
828 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
829 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530830 DPTRACE(qdf_dp_trace(msdu_list,
831 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shah07e39a62016-04-25 17:46:40 +0530832 qdf_nbuf_data_addr(msdu_list),
Nirav Shah0d58a7e2016-04-26 22:54:12 +0530833 sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800834
835 vdev->ll_pause.txq.depth++;
836 if (!vdev->ll_pause.txq.head) {
837 vdev->ll_pause.txq.head = msdu_list;
838 vdev->ll_pause.txq.tail = msdu_list;
839 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530840 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800841 }
842 vdev->ll_pause.txq.tail = msdu_list;
843
844 msdu_list = next;
845 }
846 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530847 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800848
849 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530850 qdf_timer_stop(&vdev->ll_pause.timer);
851 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
853 vdev->ll_pause.is_q_timer_on = true;
854 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530855 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800856
857 return msdu_list;
858}
859
860/*
861 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
862 * If there are too many frames in the tx queue, reject it.
863 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530864qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800865{
866 uint16_t eth_type;
867 uint32_t paused_reason;
868
869 if (msdu_list == NULL)
870 return NULL;
871
872 paused_reason = vdev->ll_pause.paused_reason;
873 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530874 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
876 paused_reason)) {
877 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530878 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800879 ethertype[0] << 8) |
880 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530881 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
883 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
884 return msdu_list;
885 }
886 }
887 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
888 } else {
889 if (vdev->ll_pause.txq.depth > 0 ||
890 vdev->pdev->tx_throttle.current_throttle_level !=
891 THROTTLE_LEVEL_0) {
892 /* not paused, but there is a backlog of frms
893 from a prior pause or throttle off phase */
894 msdu_list = ol_tx_vdev_pause_queue_append(
895 vdev, msdu_list, 0);
896 /* if throttle is disabled or phase is "on",
897 send the frame */
898 if (vdev->pdev->tx_throttle.current_throttle_level ==
899 THROTTLE_LEVEL_0 ||
900 vdev->pdev->tx_throttle.current_throttle_phase ==
901 THROTTLE_PHASE_ON) {
902 /* send as many frames as possible
903 from the vdevs backlog */
904 ol_tx_vdev_ll_pause_queue_send_base(vdev);
905 }
906 } else {
907 /* not paused, no throttle and no backlog -
908 send the new frames */
909 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
910 }
911 }
912 return msdu_list;
913}
914
915/*
916 * Run through the transmit queues for all the vdevs and
917 * send the pending frames
918 */
919void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
920{
921 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530922 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923 struct ol_txrx_vdev_t *vdev = NULL;
924 uint8_t more;
925
926 if (NULL == pdev)
927 return;
928
929 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
930 return;
931
932 /* ensure that we send no more than tx_threshold frames at once */
933 max_to_send = pdev->tx_throttle.tx_threshold;
934
935 /* round robin through the vdev queues for the given pdev */
936
937 /* Potential improvement: download several frames from the same vdev
938 at a time, since it is more likely that those frames could be
939 aggregated together, remember which vdev was serviced last,
940 so the next call this function can resume the round-robin
941 traversing where the current invocation left off */
942 do {
943 more = 0;
944 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
945
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530946 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800947 if (vdev->ll_pause.txq.depth) {
948 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530949 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800950 mutex);
951 continue;
952 }
953
954 tx_msdu = vdev->ll_pause.txq.head;
955 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530956 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957 mutex);
958 continue;
959 }
960
961 max_to_send--;
962 vdev->ll_pause.txq.depth--;
963
964 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530965 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800966
967 if (NULL == vdev->ll_pause.txq.head)
968 vdev->ll_pause.txq.tail = NULL;
969
Nirav Shahcbc6d722016-03-01 16:24:53 +0530970 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800971 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
972 /*
973 * It is unexpected that ol_tx_ll would reject
974 * the frame, since we checked that there's
975 * room for it, though there's an infinitesimal
976 * possibility that between the time we checked
977 * the room available and now, a concurrent
978 * batch of tx frames used up all the room.
979 * For simplicity, just drop the frame.
980 */
981 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530982 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530983 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530984 qdf_nbuf_tx_free(tx_msdu,
985 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800986 }
987 }
988 /*check if there are more msdus to transmit */
989 if (vdev->ll_pause.txq.depth)
990 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530991 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800992 }
993 } while (more && max_to_send);
994
995 vdev = NULL;
996 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530997 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800998 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530999 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1000 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001001 &pdev->tx_throttle.tx_timer,
1002 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301003 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004 return;
1005 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301006 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001007 }
1008}
1009
1010void ol_tx_vdev_ll_pause_queue_send(void *context)
1011{
1012 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1013 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1014
1015 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1016 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1017 return;
1018 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1019}
1020#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1021
1022static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1023{
1024 return
1025 tx_spec &
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001026 (OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027}
1028
1029static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1030{
1031 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1032
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001033 if (tx_spec & OL_TX_SPEC_NO_AGGR)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001034 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001035 if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001036 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001037 if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001038 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1039 return sub_type;
1040}
1041
Nirav Shahcbc6d722016-03-01 16:24:53 +05301042qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001043ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301044 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001045{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301046 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001047 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1048 struct ol_txrx_msdu_info_t msdu_info;
1049
1050 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1051 msdu_info.htt.action.tx_comp_req = 0;
1052
1053 /*
1054 * The msdu_list variable could be used instead of the msdu var,
1055 * but just to clarify which operations are done on a single MSDU
1056 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1057 * within the list.
1058 */
1059 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301060 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001061 struct ol_tx_desc_t *tx_desc;
1062
Nirav Shahcbc6d722016-03-01 16:24:53 +05301063 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064 msdu_info.peer = NULL;
1065 msdu_info.tso_info.is_tso = 0;
1066
1067 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1068
1069 /*
1070 * The netbuf may get linked into a different list inside the
1071 * ol_tx_send function, so store the next pointer before the
1072 * tx_send call.
1073 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301074 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001075
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001076 if (tx_spec != OL_TX_SPEC_STD) {
1077 if (tx_spec & OL_TX_SPEC_NO_FREE) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001078 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001079 } else if (tx_spec & OL_TX_SPEC_TSO) {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -07001080 tx_desc->pkt_type = OL_TX_FRM_TSO;
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001081 } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001082 uint8_t sub_type =
1083 ol_txrx_tx_raw_subtype(tx_spec);
1084 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1085 htt_pkt_type_native_wifi,
1086 sub_type);
1087 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1088 /* different types of raw frames */
1089 uint8_t sub_type =
1090 ol_txrx_tx_raw_subtype(tx_spec);
1091 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1092 htt_pkt_type_raw, sub_type);
1093 }
1094 }
1095 /*
1096 * If debug display is enabled, show the meta-data being
1097 * downloaded to the target via the HTT tx descriptor.
1098 */
1099 htt_tx_desc_display(tx_desc->htt_tx_desc);
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301100 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001101 msdu = next;
1102 }
1103 return NULL; /* all MSDUs were accepted */
1104}
1105
1106#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1107#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1108 do { \
1109 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301110 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001111 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1112 if (tx_msdu_info.peer) { \
1113 /* remove the peer reference added above */ \
1114 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1115 } \
1116 goto MSDU_LOOP_BOTTOM; \
1117 } \
1118 } while (0)
1119#else
1120#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1121#endif
1122
1123/* tx filtering is handled within the target FW */
1124#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1125
1126/**
1127 * parse_ocb_tx_header() - Function to check for OCB
1128 * TX control header on a packet and extract it if present
1129 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05301130 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001131 */
1132#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301133bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001134 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1135{
1136 struct ether_header *eth_hdr_p;
1137 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1138
1139 /* Check if TX control header is present */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301140 eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301141 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142 /* TX control header is not present. Nothing to do.. */
1143 return true;
1144
1145 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301146 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001147
1148 /* Parse the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301149 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001150
1151 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1152 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301153 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001154 sizeof(*tx_ctrl_hdr));
1155 } else {
1156 /* The TX control header is invalid. */
1157 return false;
1158 }
1159
1160 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301161 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001162 return true;
1163}
1164
Dhanashri Atreb08959a2016-03-01 17:28:03 -08001165/**
1166 * ol_tx_non_std - Allow the control-path SW to send data frames
1167 *
1168 * @data_vdev - which vdev should transmit the tx data frames
1169 * @tx_spec - what non-standard handling to apply to the tx data frames
1170 * @msdu_list - NULL-terminated list of tx MSDUs
1171 *
1172 * Generally, all tx data frames come from the OS shim into the txrx layer.
1173 * However, there are rare cases such as TDLS messaging where the UMAC
1174 * control-path SW creates tx data frames.
1175 * This UMAC SW can call this function to provide the tx data frames to
1176 * the txrx layer.
1177 * The UMAC SW can request a callback for these data frames after their
1178 * transmission completes, by using the ol_txrx_data_tx_cb_set function
1179 * to register a tx completion callback, and by specifying
1180 * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
1181 * ol_tx_non_std.
1182 * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
1183 * as specified by ol_cfg_frame_type().
1184 *
1185 * Return: null - success, skb - failure
1186 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301187qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001188ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301189 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001190{
1191 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1192}
1193
1194void
1195ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1196 ol_txrx_data_tx_cb callback, void *ctxt)
1197{
1198 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1199 pdev->tx_data_callback.func = callback;
1200 pdev->tx_data_callback.ctxt = ctxt;
1201}
1202
Dhanashri Atre12a08392016-02-17 13:10:34 -08001203/**
1204 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1205 * notifications for management frames.
1206 *
1207 * @pdev - the data physical device object
1208 * @type - the type of mgmt frame the callback is used for
1209 * @download_cb - the callback for notification of delivery to the target
1210 * @ota_ack_cb - the callback for notification of delivery to the peer
1211 * @ctxt - context to use with the callback
1212 *
1213 * When the txrx SW receives notifications from the target that a tx frame
1214 * has been delivered to its recipient, it will check if the tx frame
1215 * is a management frame. If so, the txrx SW will check the management
1216 * frame type specified when the frame was submitted for transmission.
1217 * If there is a callback function registered for the type of managment
1218 * frame in question, the txrx code will invoke the callback to inform
1219 * the management + control SW that the mgmt frame was delivered.
1220 * This function is used by the control SW to store a callback pointer
1221 * for a given type of management frame.
1222 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001223void
1224ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1225 uint8_t type,
1226 ol_txrx_mgmt_tx_cb download_cb,
1227 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1228{
1229 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1230 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1231 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1232 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1233}
1234
1235#if defined(HELIUMPLUS_PADDR64)
1236void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1237{
1238 uint32_t *frag_ptr_i_p;
1239 int i;
1240
Anurag Chouhan6d760662016-02-20 16:05:43 +05301241 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001242 tx_desc, tx_desc->id);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301243 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001244 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301245 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001246 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001247
1248 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1249 is already de-referrable (=> in virtual address space) */
1250 frag_ptr_i_p = tx_desc->htt_frag_desc;
1251
1252 /* Dump 6 words of TSO flags */
1253 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1254 DUMP_PREFIX_NONE, 8, 4,
1255 frag_ptr_i_p, 24, true);
1256
1257 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1258
1259 i = 0;
1260 while (*frag_ptr_i_p) {
1261 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1262 DUMP_PREFIX_NONE, 8, 4,
1263 frag_ptr_i_p, 8, true);
1264 i++;
1265 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1266 break;
1267 else /* jump to next pointer - skip length */
1268 frag_ptr_i_p += 2;
1269 }
1270 return;
1271}
1272#endif /* HELIUMPLUS_PADDR64 */
1273
Dhanashri Atre12a08392016-02-17 13:10:34 -08001274/**
1275 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1276 *
1277 * @vdev - virtual device transmitting the frame
1278 * @tx_mgmt_frm - management frame to transmit
1279 * @type - the type of managment frame (determines what callback to use)
1280 * @use_6mbps - specify whether management frame to transmit should
1281 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1282 * @chanfreq - channel to transmit the frame on
1283 *
1284 * Send the specified management frame from the specified virtual device.
1285 * The type is used for determining whether to invoke a callback to inform
1286 * the sender that the tx mgmt frame was delivered, and if so, which
1287 * callback to use.
1288 *
1289 * Return: 0 - the frame is accepted for transmission
1290 * 1 - the frame was not accepted
1291 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001293ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301294 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001295 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1296{
1297 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1298 struct ol_tx_desc_t *tx_desc;
1299 struct ol_txrx_msdu_info_t tx_msdu_info;
1300
1301 tx_msdu_info.tso_info.is_tso = 0;
1302
1303 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1304 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1305 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1306 tx_msdu_info.htt.action.do_tx_complete =
1307 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1308
1309 /*
1310 * FIX THIS: l2_hdr_type should only specify L2 header type
1311 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1312 * that is a combination of L2 header type and 802.11 frame type.
1313 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1314 * But if the 802.11 frame type is "data", then the HTT pkt type is
1315 * the L2 header type (more or less): 802.3 vs. Native WiFi
1316 * (basic 802.11).
1317 * (Or the header type can be "raw", which is any version of the 802.11
1318 * header, and also implies that some of the offloaded tx data
1319 * processing steps may not apply.)
1320 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1321 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1322 * needs to overload the l2_hdr_type to indicate whether the frame is
1323 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1324 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1325 * just the L2 header type. For mgmt frames, there should be a
1326 * separate function to patch the HTT pkt type to store a "mgmt" value
1327 * rather than the L2 header type. Then the HTT pkt type can be
1328 * programmed efficiently for data frames, and the msdu_info's
1329 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1330 * frame type rather than the L2 header type.
1331 */
1332 /*
1333 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1334 * htt_pkt_type_mgmt
1335 * The htt module expects a "enum htt_pkt_type" value.
1336 * The htt_dxe module expects a "enum htt_frm_type" value.
1337 * This needs to be cleaned up, so both versions of htt use a
1338 * consistent method of specifying the frame type.
1339 */
1340#ifdef QCA_SUPPORT_INTEGRATED_SOC
1341 /* tx mgmt frames always come with a 802.11 header */
1342 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1343 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1344#else
1345 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1346 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1347#endif
1348
1349 tx_msdu_info.peer = NULL;
1350
Nirav Shah2e583a02016-04-30 14:06:12 +05301351
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001352 /* For LL tx_comp_req is not used so initialized to 0 */
1353 tx_msdu_info.htt.action.tx_comp_req = 0;
1354 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1355 /* FIX THIS -
1356 * The FW currently has trouble using the host's fragments table
1357 * for management frames. Until this is fixed, rather than
1358 * specifying the fragment table to the FW, specify just the
1359 * address of the initial fragment.
1360 */
1361#if defined(HELIUMPLUS_PADDR64)
1362 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1363 tx_desc); */
1364#endif /* defined(HELIUMPLUS_PADDR64) */
1365 if (tx_desc) {
1366 /*
1367 * Following the call to ol_tx_desc_ll, frag 0 is the
1368 * HTT tx HW descriptor, and the frame payload is in
1369 * frag 1.
1370 */
1371 htt_tx_desc_frags_table_set(
1372 pdev->htt_pdev,
1373 tx_desc->htt_tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301374 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375 0, 0);
1376#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1377 dump_frag_desc(
1378 "after htt_tx_desc_frags_table_set",
1379 tx_desc);
1380#endif /* defined(HELIUMPLUS_PADDR64) */
1381 }
Nirav Shah2e583a02016-04-30 14:06:12 +05301382 if (!tx_desc)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001383 return -EINVAL; /* can't accept the tx mgmt frame */
Nirav Shah2e583a02016-04-30 14:06:12 +05301384
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001385 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1386 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1387 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1388
1389 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301390 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1391 QDF_NBUF_TX_PKT_MGMT_TRACK;
1392 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001393 htt_pkt_type_mgmt);
1394
1395 return 0; /* accepted the tx mgmt frame */
1396}
1397
1398void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1399{
1400 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1401}
1402
Nirav Shahcbc6d722016-03-01 16:24:53 +05301403qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1404 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001405{
1406 struct ol_tx_desc_t *tx_desc;
1407 struct ol_txrx_msdu_info_t msdu_info;
1408
1409 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1410 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1411 msdu_info.peer = NULL;
1412 msdu_info.htt.action.tx_comp_req = 0;
1413 msdu_info.tso_info.is_tso = 0;
1414
1415 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1416 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1417
1418 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1419
Nirav Shah0d58a7e2016-04-26 22:54:12 +05301420 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001421
1422 return NULL;
1423}
1424
1425#if defined(FEATURE_TSO)
1426void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1427{
1428 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301429 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001430
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301431 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001432 pdev->tso_seg_pool.freelist = c_element;
1433 for (i = 0; i < (num_seg - 1); i++) {
1434 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301435 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001436 c_element = c_element->next;
1437 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001438 }
Leo Chang376398b2015-10-23 14:19:02 -07001439 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301440 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001441}
1442
1443void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1444{
Leo Chang376398b2015-10-23 14:19:02 -07001445 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301446 struct qdf_tso_seg_elem_t *c_element;
1447 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001448
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301449 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001450 c_element = pdev->tso_seg_pool.freelist;
1451 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1452 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301453 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001454 c_element = temp;
1455 if (!c_element)
1456 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001457 }
1458
1459 pdev->tso_seg_pool.freelist = NULL;
1460 pdev->tso_seg_pool.num_free = 0;
1461 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301462 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1463 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001464}
1465#endif /* FEATURE_TSO */