blob: 0956af1c7b35efd062951206ce4acee8ecee1eef [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
41#include <ol_txrx.h>
42
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49
50#ifdef WLAN_FEATURE_FASTPATH
51#include <hif.h> /* HIF_DEVICE */
52#include <htc_api.h> /* Layering violation, but required for fast path */
53#include <htt_internal.h>
54#include <htt_types.h> /* htc_endpoint */
55
Nirav Shahcbc6d722016-03-01 16:24:53 +053056int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057 unsigned int num_msdus, unsigned int transfer_id);
58#endif /* WLAN_FEATURE_FASTPATH */
59
60/*
61 * The TXRX module doesn't accept tx frames unless the target has
62 * enough descriptors for them.
63 * For LL, the TXRX descriptor pool is sized to match the target's
64 * descriptor pool. Hence, if the descriptor allocation in TXRX
65 * succeeds, that guarantees that the target has room to accept
66 * the new tx frame.
67 */
68#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
69 do { \
70 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
71 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
72 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
Anurag Chouhanc5548422016-02-24 18:33:27 +053073 if (qdf_unlikely(!tx_desc)) { \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074 TXRX_STATS_MSDU_LIST_INCR( \
75 pdev, tx.dropped.host_reject, msdu); \
76 return msdu; /* the list of unaccepted MSDUs */ \
77 } \
78 } while (0)
79
Dhanashri Atre83d373d2015-07-28 16:45:59 -070080#if defined(FEATURE_TSO)
81/**
82 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
83 * related information in the msdu_info meta data
84 * @vdev: virtual device handle
85 * @msdu: network buffer
86 * @msdu_info: meta data associated with the msdu
87 *
88 * Return: 0 - success, >0 - error
89 */
90static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +053091 qdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
Dhanashri Atre83d373d2015-07-28 16:45:59 -070092{
93 msdu_info->tso_info.curr_seg = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +053094 if (qdf_nbuf_is_tso(msdu)) {
95 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
Dhanashri Atre83d373d2015-07-28 16:45:59 -070096 msdu_info->tso_info.tso_seg_list = NULL;
97 msdu_info->tso_info.num_segs = num_seg;
98 while (num_seg) {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053099 struct qdf_tso_seg_elem_t *tso_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700100 ol_tso_alloc_segment(vdev->pdev);
101 if (tso_seg) {
102 tso_seg->next =
103 msdu_info->tso_info.tso_seg_list;
104 msdu_info->tso_info.tso_seg_list
105 = tso_seg;
106 num_seg--;
107 } else {
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530108 struct qdf_tso_seg_elem_t *next_seg;
109 struct qdf_tso_seg_elem_t *free_seg =
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700110 msdu_info->tso_info.tso_seg_list;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530111 qdf_print("TSO seg alloc failed!\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700112 while (free_seg) {
113 next_seg = free_seg->next;
114 ol_tso_free_segment(vdev->pdev,
115 free_seg);
116 free_seg = next_seg;
117 }
118 return 1;
119 }
120 }
Nirav Shahcbc6d722016-03-01 16:24:53 +0530121 qdf_nbuf_get_tso_info(vdev->pdev->osdev,
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700122 msdu, &(msdu_info->tso_info));
123 msdu_info->tso_info.curr_seg =
124 msdu_info->tso_info.tso_seg_list;
125 num_seg = msdu_info->tso_info.num_segs;
126 } else {
127 msdu_info->tso_info.is_tso = 0;
128 msdu_info->tso_info.num_segs = 1;
129 }
130 return 0;
131}
132#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800133
134/**
135 * ol_tx_send_data_frame() - send data frame
136 * @sta_id: sta id
137 * @skb: skb
138 * @proto_type: proto type
139 *
140 * Return: skb/NULL for success
141 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530142qdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, qdf_nbuf_t skb,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143 uint8_t proto_type)
144{
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530145 void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
Anurag Chouhan6d760662016-02-20 16:05:43 +0530146 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147 struct ol_txrx_peer_t *peer;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530148 qdf_nbuf_t ret;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530149 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800150
Anurag Chouhanc5548422016-02-24 18:33:27 +0530151 if (qdf_unlikely(!pdev)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530152 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800153 "%s:pdev is null", __func__);
154 return skb;
155 }
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530156 if (qdf_unlikely(!qdf_ctx)) {
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800157 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530158 "%s:qdf_ctx is null", __func__);
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800159 return skb;
160 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161
162 if (sta_id >= WLAN_MAX_STA_COUNT) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530163 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 "%s:Invalid sta id", __func__);
165 return skb;
166 }
167
168 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
169 if (!peer) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530170 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171 "%s:Invalid peer", __func__);
172 return skb;
173 }
174
175 if (peer->state < ol_txrx_peer_state_conn) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530176 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800177 "%s: station to be yet registered..dropping pkt", __func__);
178 return skb;
179 }
180
Nirav Shahcbc6d722016-03-01 16:24:53 +0530181 status = qdf_nbuf_map_single(qdf_ctx, skb, QDF_DMA_TO_DEVICE);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530182 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530183 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184 "%s: nbuf map failed", __func__);
185 return skb;
186 }
187
Nirav Shahcbc6d722016-03-01 16:24:53 +0530188 qdf_nbuf_trace_set_proto_type(skb, proto_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800189
190 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530191 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
192 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
193 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800194
195 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530196 qdf_nbuf_set_next(skb, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197 ret = OL_TX_LL(peer->vdev, skb);
198 if (ret) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530199 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 "%s: Failed to tx", __func__);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530201 qdf_nbuf_unmap_single(qdf_ctx, ret, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202 return ret;
203 }
204
205 return NULL;
206}
207
208#ifdef IPA_OFFLOAD
209/**
210 * ol_tx_send_ipa_data_frame() - send IPA data frame
211 * @vdev: vdev
212 * @skb: skb
213 *
214 * Return: skb/ NULL is for success
215 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530216qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
217 qdf_nbuf_t skb)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800218{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530219 ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530220 qdf_nbuf_t ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800221
Anurag Chouhanc5548422016-02-24 18:33:27 +0530222 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800223 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
224 "%s: pdev is NULL", __func__);
225 return skb;
226 }
227
228 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
Nirav Shahcbc6d722016-03-01 16:24:53 +0530229 && (qdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
230 && (qdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
231 qdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232
233 /* Terminate the (single-element) list of tx frames */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530234 qdf_nbuf_set_next(skb, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
236 if (ret) {
237 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
238 "%s: Failed to tx", __func__);
239 return ret;
240 }
241
242 return NULL;
243}
244#endif
245
246
247#if defined(FEATURE_TSO)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530248qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530250 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251 struct ol_txrx_msdu_info_t msdu_info;
252
253 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
254 msdu_info.htt.action.tx_comp_req = 0;
255 /*
256 * The msdu_list variable could be used instead of the msdu var,
257 * but just to clarify which operations are done on a single MSDU
258 * vs. a list of MSDUs, use a distinct variable for single MSDUs
259 * within the list.
260 */
261 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530262 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263 struct ol_tx_desc_t *tx_desc;
264 int segments = 1;
265
Nirav Shahcbc6d722016-03-01 16:24:53 +0530266 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267 msdu_info.peer = NULL;
268
Anurag Chouhanc5548422016-02-24 18:33:27 +0530269 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530270 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700271 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
272 tx.dropped.host_reject, msdu);
273 return msdu;
274 }
275
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276 segments = msdu_info.tso_info.num_segs;
277
278 /*
279 * The netbuf may get linked into a different list inside the
280 * ol_tx_send function, so store the next pointer before the
281 * tx_send call.
282 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530283 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 /* init the current segment to the 1st segment in the list */
285 while (segments) {
286
287 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530288 QDF_NBUF_CB_PADDR(msdu) =
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800289 msdu_info.tso_info.curr_seg->
290 seg.tso_frags[0].paddr_low_32;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291
292 segments--;
293
294 /**
295 * if this is a jumbo nbuf, then increment the number
296 * of nbuf users for each additional segment of the msdu.
297 * This will ensure that the skb is freed only after
298 * receiving tx completion for all segments of an nbuf
299 */
300 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530301 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302
303 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
304
305 /*
306 * If debug display is enabled, show the meta-data being
307 * downloaded to the target via the HTT tx descriptor.
308 */
309 htt_tx_desc_display(tx_desc->htt_tx_desc);
310
311 ol_tx_send(vdev->pdev, tx_desc, msdu);
312
313 if (msdu_info.tso_info.curr_seg) {
314 msdu_info.tso_info.curr_seg =
315 msdu_info.tso_info.curr_seg->next;
316 }
317
Nirav Shahcbc6d722016-03-01 16:24:53 +0530318 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800319
320 if (msdu_info.tso_info.is_tso) {
321 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
322 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
323 }
324 } /* while segments */
325
326 msdu = next;
327 if (msdu_info.tso_info.is_tso) {
328 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
329 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
330 }
331 } /* while msdus */
332 return NULL; /* all MSDUs were accepted */
333}
334#else /* TSO */
335
Nirav Shahcbc6d722016-03-01 16:24:53 +0530336qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530338 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800339 struct ol_txrx_msdu_info_t msdu_info;
340
341 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
342 msdu_info.htt.action.tx_comp_req = 0;
343 msdu_info.tso_info.is_tso = 0;
344 /*
345 * The msdu_list variable could be used instead of the msdu var,
346 * but just to clarify which operations are done on a single MSDU
347 * vs. a list of MSDUs, use a distinct variable for single MSDUs
348 * within the list.
349 */
350 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530351 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 struct ol_tx_desc_t *tx_desc;
353
Nirav Shahcbc6d722016-03-01 16:24:53 +0530354 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 msdu_info.peer = NULL;
356 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
357
358 /*
359 * If debug display is enabled, show the meta-data being
360 * downloaded to the target via the HTT tx descriptor.
361 */
362 htt_tx_desc_display(tx_desc->htt_tx_desc);
363 /*
364 * The netbuf may get linked into a different list inside the
365 * ol_tx_send function, so store the next pointer before the
366 * tx_send call.
367 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530368 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369 ol_tx_send(vdev->pdev, tx_desc, msdu);
370 msdu = next;
371 }
372 return NULL; /* all MSDUs were accepted */
373}
374#endif /* TSO */
375
376#ifdef WLAN_FEATURE_FASTPATH
377/**
378 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
379 *
380 * Allocate and prepare Tx descriptor with msdu and fragment descritor
381 * inforamtion.
382 *
383 * @pdev: pointer to ol pdev handle
384 * @vdev: pointer to ol vdev handle
385 * @msdu: linked list of msdu packets
386 * @pkt_download_len: packet download length
387 * @ep_id: endpoint ID
388 * @msdu_info: Handle to msdu_info
389 *
390 * Return: Pointer to Tx descriptor
391 */
392static inline struct ol_tx_desc_t *
393ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530394 ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 uint32_t pkt_download_len, uint32_t ep_id,
396 struct ol_txrx_msdu_info_t *msdu_info)
397{
398 struct ol_tx_desc_t *tx_desc = NULL;
399 uint32_t *htt_tx_desc;
400 void *htc_hdr_vaddr;
401 u_int32_t num_frags, i;
402
403 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
Anurag Chouhanc5548422016-02-24 18:33:27 +0530404 if (qdf_unlikely(!tx_desc))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 return NULL;
406
407 tx_desc->netbuf = msdu;
408 if (msdu_info->tso_info.is_tso) {
409 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
410 tx_desc->pkt_type = ol_tx_frm_tso;
411 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
412 } else {
413 tx_desc->pkt_type = ol_tx_frm_std;
414 }
415
416 htt_tx_desc = tx_desc->htt_tx_desc;
417
418 /* Make sure frags num is set to 0 */
419 /*
420 * Do this here rather than in hardstart, so
421 * that we can hopefully take only one cache-miss while
422 * accessing skb->cb.
423 */
424
425 /* HTT Header */
426 /* TODO : Take care of multiple fragments */
427
428 /* TODO: Precompute and store paddr in ol_tx_desc_t */
429 /* Virtual address of the HTT/HTC header, added by driver */
430 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
431 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
432 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
433 &msdu_info->htt, &msdu_info->tso_info,
434 NULL, vdev->opmode == wlan_op_mode_ocb);
435
Nirav Shahcbc6d722016-03-01 16:24:53 +0530436 num_frags = qdf_nbuf_get_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437 /* num_frags are expected to be 2 max */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530438 num_frags = (num_frags > QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)
439 ? QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800440 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441#if defined(HELIUMPLUS_PADDR64)
442 /*
443 * Use num_frags - 1, since 1 frag is used to store
444 * the HTT/HTC descriptor
445 * Refer to htt_tx_desc_init()
446 */
447 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
448 num_frags - 1);
449#else /* ! defined(HELIUMPLUSPADDR64) */
450 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
451 num_frags-1);
452#endif /* defined(HELIUMPLUS_PADDR64) */
453 if (msdu_info->tso_info.is_tso) {
454 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
455 tx_desc->htt_frag_desc, &msdu_info->tso_info);
456 TXRX_STATS_TSO_SEG_UPDATE(pdev,
457 msdu_info->tso_info.curr_seg->seg);
458 } else {
459 for (i = 1; i < num_frags; i++) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530460 qdf_size_t frag_len;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530461 qdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800462
Nirav Shahcbc6d722016-03-01 16:24:53 +0530463 frag_len = qdf_nbuf_get_frag_len(msdu, i);
464 frag_paddr = qdf_nbuf_get_frag_paddr(msdu, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465#if defined(HELIUMPLUS_PADDR64)
466 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
467 i - 1, frag_paddr, frag_len);
468#if defined(HELIUMPLUS_DEBUG)
Anurag Chouhan6d760662016-02-20 16:05:43 +0530469 qdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800471 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 dump_pkt(netbuf, frag_paddr, 64);
473#endif /* HELIUMPLUS_DEBUG */
474#else /* ! defined(HELIUMPLUSPADDR64) */
475 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
476 i - 1, frag_paddr, frag_len);
477#endif /* defined(HELIUMPLUS_PADDR64) */
478 }
479 }
480
481 /*
482 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
483 * this is not required. We still have to mark the swap bit correctly,
484 * when posting to the ring
485 */
486 /* Check to make sure, data download length is correct */
487
488 /*
489 * TODO : Can we remove this check and always download a fixed length ?
490 * */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530491 if (qdf_unlikely(qdf_nbuf_len(msdu) < pkt_download_len))
492 pkt_download_len = qdf_nbuf_len(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800493
494 /* Fill the HTC header information */
495 /*
496 * Passing 0 as the seq_no field, we can probably get away
497 * with it for the time being, since this is not checked in f/w
498 */
499 /* TODO : Prefill this, look at multi-fragment case */
500 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
501
502 return tx_desc;
503}
504#if defined(FEATURE_TSO)
505/**
506 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
507 *
508 * @vdev: handle to ol_txrx_vdev_t
509 * @msdu_list: msdu list to be sent out.
510 *
511 * Return: on success return NULL, pointer to nbuf when it fails to send.
512 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530513qdf_nbuf_t
514ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800515{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530516 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800517 struct ol_txrx_pdev_t *pdev = vdev->pdev;
518 uint32_t pkt_download_len =
519 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
520 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
521 struct ol_txrx_msdu_info_t msdu_info;
522
523 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
524 msdu_info.htt.action.tx_comp_req = 0;
525 /*
526 * The msdu_list variable could be used instead of the msdu var,
527 * but just to clarify which operations are done on a single MSDU
528 * vs. a list of MSDUs, use a distinct variable for single MSDUs
529 * within the list.
530 */
531 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530532 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533 struct ol_tx_desc_t *tx_desc;
534 int segments = 1;
535
Nirav Shahcbc6d722016-03-01 16:24:53 +0530536 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 msdu_info.peer = NULL;
538
Anurag Chouhanc5548422016-02-24 18:33:27 +0530539 if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530540 qdf_print("ol_tx_prepare_tso failed\n");
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700541 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
542 tx.dropped.host_reject, msdu);
543 return msdu;
544 }
545
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800546 segments = msdu_info.tso_info.num_segs;
547
548 /*
549 * The netbuf may get linked into a different list
550 * inside the ce_send_fast function, so store the next
551 * pointer before the ce_send call.
552 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530553 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800554 /* init the current segment to the 1st segment in the list */
555 while (segments) {
556
557 if (msdu_info.tso_info.curr_seg)
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530558 QDF_NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800559 curr_seg->seg.tso_frags[0].paddr_low_32;
560
561 segments--;
562
563 /**
564 * if this is a jumbo nbuf, then increment the number
565 * of nbuf users for each additional segment of the msdu.
566 * This will ensure that the skb is freed only after
567 * receiving tx completion for all segments of an nbuf
568 */
569 if (segments)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530570 qdf_nbuf_inc_users(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800571
572 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
573 msdu_info.htt.info.vdev_id = vdev->vdev_id;
574 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530575 qdf_nbuf_get_tx_cksum(msdu);
576 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530577 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
578 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800579 /* We want to encrypt this frame */
580 msdu_info.htt.action.do_encrypt = 1;
581 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530582 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 /* We don't want to encrypt this frame */
584 msdu_info.htt.action.do_encrypt = 0;
585 break;
586 default:
587 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530588 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589 break;
590 }
591
592 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
593 pkt_download_len, ep_id,
594 &msdu_info);
595
Anurag Chouhanc5548422016-02-24 18:33:27 +0530596 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800597 /*
598 * If debug display is enabled, show the meta
599 * data being downloaded to the target via the
600 * HTT tx descriptor.
601 */
602 htt_tx_desc_display(tx_desc->htt_tx_desc);
603 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
604 1, ep_id))) {
605 /*
606 * The packet could not be sent.
607 * Free the descriptor, return the
608 * packet to the caller.
609 */
610 ol_tx_desc_free(pdev, tx_desc);
611 return msdu;
612 }
613 if (msdu_info.tso_info.curr_seg) {
614 msdu_info.tso_info.curr_seg =
615 msdu_info.tso_info.curr_seg->next;
616 }
617
618 if (msdu_info.tso_info.is_tso) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530619 qdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800620 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
621 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
622 }
623 } else {
624 TXRX_STATS_MSDU_LIST_INCR(
625 pdev, tx.dropped.host_reject, msdu);
626 /* the list of unaccepted MSDUs */
627 return msdu;
628 }
629 } /* while segments */
630
631 msdu = next;
632 if (msdu_info.tso_info.is_tso) {
633 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
634 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
635 }
636 } /* while msdus */
637 return NULL; /* all MSDUs were accepted */
638}
639#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530640qdf_nbuf_t
641ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800642{
Nirav Shahcbc6d722016-03-01 16:24:53 +0530643 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800644 struct ol_txrx_pdev_t *pdev = vdev->pdev;
645 uint32_t pkt_download_len =
646 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
647 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
648 struct ol_txrx_msdu_info_t msdu_info;
649
650 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
651 msdu_info.htt.action.tx_comp_req = 0;
652 msdu_info.tso_info.is_tso = 0;
653 /*
654 * The msdu_list variable could be used instead of the msdu var,
655 * but just to clarify which operations are done on a single MSDU
656 * vs. a list of MSDUs, use a distinct variable for single MSDUs
657 * within the list.
658 */
659 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530660 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 struct ol_tx_desc_t *tx_desc;
662
Nirav Shahcbc6d722016-03-01 16:24:53 +0530663 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 msdu_info.peer = NULL;
665
666 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
667 msdu_info.htt.info.vdev_id = vdev->vdev_id;
668 msdu_info.htt.action.cksum_offload =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530669 qdf_nbuf_get_tx_cksum(msdu);
670 switch (qdf_nbuf_get_exemption_type(msdu)) {
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530671 case QDF_NBUF_EXEMPT_NO_EXEMPTION:
672 case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800673 /* We want to encrypt this frame */
674 msdu_info.htt.action.do_encrypt = 1;
675 break;
Anurag Chouhanc73697b2016-02-21 15:05:43 +0530676 case QDF_NBUF_EXEMPT_ALWAYS:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800677 /* We don't want to encrypt this frame */
678 msdu_info.htt.action.do_encrypt = 0;
679 break;
680 default:
681 msdu_info.htt.action.do_encrypt = 1;
Anurag Chouhanc5548422016-02-24 18:33:27 +0530682 qdf_assert(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683 break;
684 }
685
686 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
687 pkt_download_len, ep_id,
688 &msdu_info);
689
Anurag Chouhanc5548422016-02-24 18:33:27 +0530690 if (qdf_likely(tx_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691 /*
692 * If debug display is enabled, show the meta-data being
693 * downloaded to the target via the HTT tx descriptor.
694 */
695 htt_tx_desc_display(tx_desc->htt_tx_desc);
696 /*
697 * The netbuf may get linked into a different list
698 * inside the ce_send_fast function, so store the next
699 * pointer before the ce_send call.
700 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530701 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800702 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
703 ep_id))) {
704 /* The packet could not be sent */
705 /* Free the descriptor, return the packet to the
706 * caller */
707 ol_tx_desc_free(pdev, tx_desc);
708 return msdu;
709 }
710 msdu = next;
711 } else {
712 TXRX_STATS_MSDU_LIST_INCR(
713 pdev, tx.dropped.host_reject, msdu);
714 return msdu; /* the list of unaccepted MSDUs */
715 }
716 }
717
718 return NULL; /* all MSDUs were accepted */
719}
720#endif /* FEATURE_TSO */
721#endif /* WLAN_FEATURE_FASTPATH */
722
723#ifdef WLAN_FEATURE_FASTPATH
724/**
725 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
726 *
727 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530728static inline qdf_nbuf_t
729ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730{
Komal Seelam3d202862016-02-24 18:43:24 +0530731 struct hif_opaque_softc *hif_device =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530732 (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733
Anurag Chouhanc5548422016-02-24 18:33:27 +0530734 if (qdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
736 else
737 msdu_list = ol_tx_ll(vdev, msdu_list);
738
739 return msdu_list;
740}
741#else
Nirav Shahcbc6d722016-03-01 16:24:53 +0530742static inline qdf_nbuf_t
743ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744{
745 return ol_tx_ll(vdev, msdu_list);
746}
747#endif /* WLAN_FEATURE_FASTPATH */
748
749#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
750
751#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
752#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
753static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
754{
755 int max_to_accept;
756
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530757 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800758 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530759 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800760 return;
761 }
762
763 /*
764 * Send as much of the backlog as possible, but leave some margin
765 * of unallocated tx descriptors that can be used for new frames
766 * being transmitted by other vdevs.
767 * Ideally there would be a scheduler, which would not only leave
768 * some margin for new frames for other vdevs, but also would
769 * fairly apportion the tx descriptors between multiple vdevs that
770 * have backlogs in their pause queues.
771 * However, the fairness benefit of having a scheduler for frames
772 * from multiple vdev's pause queues is not sufficient to outweigh
773 * the extra complexity.
774 */
775 max_to_accept = vdev->pdev->tx_desc.num_free -
776 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
777 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530778 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800779 max_to_accept--;
780 vdev->ll_pause.txq.depth--;
781 tx_msdu = vdev->ll_pause.txq.head;
782 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530783 vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800784 if (NULL == vdev->ll_pause.txq.head)
785 vdev->ll_pause.txq.tail = NULL;
Nirav Shahcbc6d722016-03-01 16:24:53 +0530786 qdf_nbuf_set_next(tx_msdu, NULL);
787 QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
788 QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
790 /*
791 * It is unexpected that ol_tx_ll would reject the frame
792 * since we checked that there's room for it, though
793 * there's an infinitesimal possibility that between the
794 * time we checked the room available and now, a
795 * concurrent batch of tx frames used up all the room.
796 * For simplicity, just drop the frame.
797 */
798 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530799 qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530800 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530801 qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800802 }
803 }
804 }
805 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530806 qdf_timer_stop(&vdev->ll_pause.timer);
807 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800808 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
809 vdev->ll_pause.is_q_timer_on = true;
810 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
811 vdev->ll_pause.q_overflow_cnt++;
812 }
813
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530814 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800815}
816
Nirav Shahcbc6d722016-03-01 16:24:53 +0530817static qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800818ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530819 qdf_nbuf_t msdu_list, uint8_t start_timer)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800820{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530821 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800822 while (msdu_list &&
823 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530824 qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
825 QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
826 QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530827 DPTRACE(qdf_dp_trace(msdu_list,
828 QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530829 (uint8_t *)(qdf_nbuf_data(msdu_list)),
830 sizeof(qdf_nbuf_data(msdu_list))));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800831
832 vdev->ll_pause.txq.depth++;
833 if (!vdev->ll_pause.txq.head) {
834 vdev->ll_pause.txq.head = msdu_list;
835 vdev->ll_pause.txq.tail = msdu_list;
836 } else {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530837 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838 }
839 vdev->ll_pause.txq.tail = msdu_list;
840
841 msdu_list = next;
842 }
843 if (vdev->ll_pause.txq.tail)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530844 qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800845
846 if (start_timer) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530847 qdf_timer_stop(&vdev->ll_pause.timer);
848 qdf_timer_start(&vdev->ll_pause.timer,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800849 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
850 vdev->ll_pause.is_q_timer_on = true;
851 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530852 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800853
854 return msdu_list;
855}
856
857/*
858 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
859 * If there are too many frames in the tx queue, reject it.
860 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530861qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800862{
863 uint16_t eth_type;
864 uint32_t paused_reason;
865
866 if (msdu_list == NULL)
867 return NULL;
868
869 paused_reason = vdev->ll_pause.paused_reason;
870 if (paused_reason) {
Anurag Chouhanc5548422016-02-24 18:33:27 +0530871 if (qdf_unlikely((paused_reason &
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800872 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
873 paused_reason)) {
874 eth_type = (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530875 qdf_nbuf_data(msdu_list))->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876 ethertype[0] << 8) |
877 (((struct ethernet_hdr_t *)
Nirav Shahcbc6d722016-03-01 16:24:53 +0530878 qdf_nbuf_data(msdu_list))->ethertype[1]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800879 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
880 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
881 return msdu_list;
882 }
883 }
884 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
885 } else {
886 if (vdev->ll_pause.txq.depth > 0 ||
887 vdev->pdev->tx_throttle.current_throttle_level !=
888 THROTTLE_LEVEL_0) {
889 /* not paused, but there is a backlog of frms
890 from a prior pause or throttle off phase */
891 msdu_list = ol_tx_vdev_pause_queue_append(
892 vdev, msdu_list, 0);
893 /* if throttle is disabled or phase is "on",
894 send the frame */
895 if (vdev->pdev->tx_throttle.current_throttle_level ==
896 THROTTLE_LEVEL_0 ||
897 vdev->pdev->tx_throttle.current_throttle_phase ==
898 THROTTLE_PHASE_ON) {
899 /* send as many frames as possible
900 from the vdevs backlog */
901 ol_tx_vdev_ll_pause_queue_send_base(vdev);
902 }
903 } else {
904 /* not paused, no throttle and no backlog -
905 send the new frames */
906 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
907 }
908 }
909 return msdu_list;
910}
911
912/*
913 * Run through the transmit queues for all the vdevs and
914 * send the pending frames
915 */
916void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
917{
918 int max_to_send; /* tracks how many frames have been sent */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530919 qdf_nbuf_t tx_msdu;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800920 struct ol_txrx_vdev_t *vdev = NULL;
921 uint8_t more;
922
923 if (NULL == pdev)
924 return;
925
926 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
927 return;
928
929 /* ensure that we send no more than tx_threshold frames at once */
930 max_to_send = pdev->tx_throttle.tx_threshold;
931
932 /* round robin through the vdev queues for the given pdev */
933
934 /* Potential improvement: download several frames from the same vdev
935 at a time, since it is more likely that those frames could be
936 aggregated together, remember which vdev was serviced last,
937 so the next call this function can resume the round-robin
938 traversing where the current invocation left off */
939 do {
940 more = 0;
941 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
942
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530943 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944 if (vdev->ll_pause.txq.depth) {
945 if (vdev->ll_pause.paused_reason) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530946 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800947 mutex);
948 continue;
949 }
950
951 tx_msdu = vdev->ll_pause.txq.head;
952 if (NULL == tx_msdu) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530953 qdf_spin_unlock_bh(&vdev->ll_pause.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800954 mutex);
955 continue;
956 }
957
958 max_to_send--;
959 vdev->ll_pause.txq.depth--;
960
961 vdev->ll_pause.txq.head =
Nirav Shahcbc6d722016-03-01 16:24:53 +0530962 qdf_nbuf_next(tx_msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963
964 if (NULL == vdev->ll_pause.txq.head)
965 vdev->ll_pause.txq.tail = NULL;
966
Nirav Shahcbc6d722016-03-01 16:24:53 +0530967 qdf_nbuf_set_next(tx_msdu, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800968 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
969 /*
970 * It is unexpected that ol_tx_ll would reject
971 * the frame, since we checked that there's
972 * room for it, though there's an infinitesimal
973 * possibility that between the time we checked
974 * the room available and now, a concurrent
975 * batch of tx frames used up all the room.
976 * For simplicity, just drop the frame.
977 */
978 if (tx_msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530979 qdf_nbuf_unmap(pdev->osdev, tx_msdu,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530980 QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530981 qdf_nbuf_tx_free(tx_msdu,
982 QDF_NBUF_PKT_ERROR);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800983 }
984 }
985 /*check if there are more msdus to transmit */
986 if (vdev->ll_pause.txq.depth)
987 more = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530988 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989 }
990 } while (more && max_to_send);
991
992 vdev = NULL;
993 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530994 qdf_spin_lock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995 if (vdev->ll_pause.txq.depth) {
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530996 qdf_timer_stop(&pdev->tx_throttle.tx_timer);
997 qdf_timer_start(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800998 &pdev->tx_throttle.tx_timer,
999 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301000 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001001 return;
1002 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301003 qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004 }
1005}
1006
1007void ol_tx_vdev_ll_pause_queue_send(void *context)
1008{
1009 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1010 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1011
1012 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1013 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1014 return;
1015 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1016}
1017#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1018
1019static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1020{
1021 return
1022 tx_spec &
1023 (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
1024}
1025
1026static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1027{
1028 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1029
1030 if (tx_spec & ol_tx_spec_no_aggr)
1031 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
1032 if (tx_spec & ol_tx_spec_no_encrypt)
1033 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1034 if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
1035 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1036 return sub_type;
1037}
1038
Nirav Shahcbc6d722016-03-01 16:24:53 +05301039qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001040ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301041 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001042{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301043 qdf_nbuf_t msdu = msdu_list;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001044 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1045 struct ol_txrx_msdu_info_t msdu_info;
1046
1047 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1048 msdu_info.htt.action.tx_comp_req = 0;
1049
1050 /*
1051 * The msdu_list variable could be used instead of the msdu var,
1052 * but just to clarify which operations are done on a single MSDU
1053 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1054 * within the list.
1055 */
1056 while (msdu) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301057 qdf_nbuf_t next;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058 struct ol_tx_desc_t *tx_desc;
1059
Nirav Shahcbc6d722016-03-01 16:24:53 +05301060 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001061 msdu_info.peer = NULL;
1062 msdu_info.tso_info.is_tso = 0;
1063
1064 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1065
1066 /*
1067 * The netbuf may get linked into a different list inside the
1068 * ol_tx_send function, so store the next pointer before the
1069 * tx_send call.
1070 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301071 next = qdf_nbuf_next(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001072
1073 if (tx_spec != ol_tx_spec_std) {
1074 if (tx_spec & ol_tx_spec_no_free) {
1075 tx_desc->pkt_type = ol_tx_frm_no_free;
1076 } else if (tx_spec & ol_tx_spec_tso) {
1077 tx_desc->pkt_type = ol_tx_frm_tso;
1078 } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
1079 uint8_t sub_type =
1080 ol_txrx_tx_raw_subtype(tx_spec);
1081 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1082 htt_pkt_type_native_wifi,
1083 sub_type);
1084 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1085 /* different types of raw frames */
1086 uint8_t sub_type =
1087 ol_txrx_tx_raw_subtype(tx_spec);
1088 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1089 htt_pkt_type_raw, sub_type);
1090 }
1091 }
1092 /*
1093 * If debug display is enabled, show the meta-data being
1094 * downloaded to the target via the HTT tx descriptor.
1095 */
1096 htt_tx_desc_display(tx_desc->htt_tx_desc);
1097 ol_tx_send(vdev->pdev, tx_desc, msdu);
1098 msdu = next;
1099 }
1100 return NULL; /* all MSDUs were accepted */
1101}
1102
1103#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1104#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1105 do { \
1106 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301107 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001108 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1109 if (tx_msdu_info.peer) { \
1110 /* remove the peer reference added above */ \
1111 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1112 } \
1113 goto MSDU_LOOP_BOTTOM; \
1114 } \
1115 } while (0)
1116#else
1117#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1118#endif
1119
1120/* tx filtering is handled within the target FW */
1121#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1122
1123/**
1124 * parse_ocb_tx_header() - Function to check for OCB
1125 * TX control header on a packet and extract it if present
1126 *
Nirav Shahcbc6d722016-03-01 16:24:53 +05301127 * @msdu: Pointer to OS packet (qdf_nbuf_t)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001128 */
1129#define OCB_HEADER_VERSION 1
Nirav Shahcbc6d722016-03-01 16:24:53 +05301130bool parse_ocb_tx_header(qdf_nbuf_t msdu,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001131 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1132{
1133 struct ether_header *eth_hdr_p;
1134 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1135
1136 /* Check if TX control header is present */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301137 eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301138 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139 /* TX control header is not present. Nothing to do.. */
1140 return true;
1141
1142 /* Remove the ethernet header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301143 qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001144
1145 /* Parse the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301146 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001147
1148 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1149 if (tx_ctrl)
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301150 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001151 sizeof(*tx_ctrl_hdr));
1152 } else {
1153 /* The TX control header is invalid. */
1154 return false;
1155 }
1156
1157 /* Remove the TX control header */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301158 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001159 return true;
1160}
1161
Nirav Shahcbc6d722016-03-01 16:24:53 +05301162qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163ol_tx_non_std(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301164 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165{
1166 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1167}
1168
1169void
1170ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1171 ol_txrx_data_tx_cb callback, void *ctxt)
1172{
1173 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1174 pdev->tx_data_callback.func = callback;
1175 pdev->tx_data_callback.ctxt = ctxt;
1176}
1177
Dhanashri Atre12a08392016-02-17 13:10:34 -08001178/**
1179 * ol_txrx_mgmt_tx_cb_set() - Store a callback for delivery
1180 * notifications for management frames.
1181 *
1182 * @pdev - the data physical device object
1183 * @type - the type of mgmt frame the callback is used for
1184 * @download_cb - the callback for notification of delivery to the target
1185 * @ota_ack_cb - the callback for notification of delivery to the peer
1186 * @ctxt - context to use with the callback
1187 *
1188 * When the txrx SW receives notifications from the target that a tx frame
1189 * has been delivered to its recipient, it will check if the tx frame
1190 * is a management frame. If so, the txrx SW will check the management
1191 * frame type specified when the frame was submitted for transmission.
1192 * If there is a callback function registered for the type of managment
1193 * frame in question, the txrx code will invoke the callback to inform
1194 * the management + control SW that the mgmt frame was delivered.
1195 * This function is used by the control SW to store a callback pointer
1196 * for a given type of management frame.
1197 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001198void
1199ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1200 uint8_t type,
1201 ol_txrx_mgmt_tx_cb download_cb,
1202 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1203{
1204 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1205 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1206 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1207 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1208}
1209
1210#if defined(HELIUMPLUS_PADDR64)
1211void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1212{
1213 uint32_t *frag_ptr_i_p;
1214 int i;
1215
Anurag Chouhan6d760662016-02-20 16:05:43 +05301216 qdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001217 tx_desc, tx_desc->id);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301218 qdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Anurag Chouhan6d760662016-02-20 16:05:43 +05301220 qdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001221 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001222
1223 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1224 is already de-referrable (=> in virtual address space) */
1225 frag_ptr_i_p = tx_desc->htt_frag_desc;
1226
1227 /* Dump 6 words of TSO flags */
1228 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1229 DUMP_PREFIX_NONE, 8, 4,
1230 frag_ptr_i_p, 24, true);
1231
1232 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1233
1234 i = 0;
1235 while (*frag_ptr_i_p) {
1236 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1237 DUMP_PREFIX_NONE, 8, 4,
1238 frag_ptr_i_p, 8, true);
1239 i++;
1240 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1241 break;
1242 else /* jump to next pointer - skip length */
1243 frag_ptr_i_p += 2;
1244 }
1245 return;
1246}
1247#endif /* HELIUMPLUS_PADDR64 */
1248
Dhanashri Atre12a08392016-02-17 13:10:34 -08001249/**
1250 * ol_txrx_mgmt_send_ext() - Transmit a management frame
1251 *
1252 * @vdev - virtual device transmitting the frame
1253 * @tx_mgmt_frm - management frame to transmit
1254 * @type - the type of managment frame (determines what callback to use)
1255 * @use_6mbps - specify whether management frame to transmit should
1256 * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P)
1257 * @chanfreq - channel to transmit the frame on
1258 *
1259 * Send the specified management frame from the specified virtual device.
1260 * The type is used for determining whether to invoke a callback to inform
1261 * the sender that the tx mgmt frame was delivered, and if so, which
1262 * callback to use.
1263 *
1264 * Return: 0 - the frame is accepted for transmission
1265 * 1 - the frame was not accepted
1266 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267int
Dhanashri Atre12a08392016-02-17 13:10:34 -08001268ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301269 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001270 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1271{
1272 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1273 struct ol_tx_desc_t *tx_desc;
1274 struct ol_txrx_msdu_info_t tx_msdu_info;
1275
1276 tx_msdu_info.tso_info.is_tso = 0;
1277
1278 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1279 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1280 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1281 tx_msdu_info.htt.action.do_tx_complete =
1282 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1283
1284 /*
1285 * FIX THIS: l2_hdr_type should only specify L2 header type
1286 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1287 * that is a combination of L2 header type and 802.11 frame type.
1288 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1289 * But if the 802.11 frame type is "data", then the HTT pkt type is
1290 * the L2 header type (more or less): 802.3 vs. Native WiFi
1291 * (basic 802.11).
1292 * (Or the header type can be "raw", which is any version of the 802.11
1293 * header, and also implies that some of the offloaded tx data
1294 * processing steps may not apply.)
1295 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1296 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1297 * needs to overload the l2_hdr_type to indicate whether the frame is
1298 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1299 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1300 * just the L2 header type. For mgmt frames, there should be a
1301 * separate function to patch the HTT pkt type to store a "mgmt" value
1302 * rather than the L2 header type. Then the HTT pkt type can be
1303 * programmed efficiently for data frames, and the msdu_info's
1304 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1305 * frame type rather than the L2 header type.
1306 */
1307 /*
1308 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1309 * htt_pkt_type_mgmt
1310 * The htt module expects a "enum htt_pkt_type" value.
1311 * The htt_dxe module expects a "enum htt_frm_type" value.
1312 * This needs to be cleaned up, so both versions of htt use a
1313 * consistent method of specifying the frame type.
1314 */
1315#ifdef QCA_SUPPORT_INTEGRATED_SOC
1316 /* tx mgmt frames always come with a 802.11 header */
1317 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1318 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1319#else
1320 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1321 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1322#endif
1323
1324 tx_msdu_info.peer = NULL;
1325
Nirav Shahcbc6d722016-03-01 16:24:53 +05301326 qdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327 /* For LL tx_comp_req is not used so initialized to 0 */
1328 tx_msdu_info.htt.action.tx_comp_req = 0;
1329 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1330 /* FIX THIS -
1331 * The FW currently has trouble using the host's fragments table
1332 * for management frames. Until this is fixed, rather than
1333 * specifying the fragment table to the FW, specify just the
1334 * address of the initial fragment.
1335 */
1336#if defined(HELIUMPLUS_PADDR64)
1337 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1338 tx_desc); */
1339#endif /* defined(HELIUMPLUS_PADDR64) */
1340 if (tx_desc) {
1341 /*
1342 * Following the call to ol_tx_desc_ll, frag 0 is the
1343 * HTT tx HW descriptor, and the frame payload is in
1344 * frag 1.
1345 */
1346 htt_tx_desc_frags_table_set(
1347 pdev->htt_pdev,
1348 tx_desc->htt_tx_desc,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301349 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001350 0, 0);
1351#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1352 dump_frag_desc(
1353 "after htt_tx_desc_frags_table_set",
1354 tx_desc);
1355#endif /* defined(HELIUMPLUS_PADDR64) */
1356 }
1357 if (!tx_desc) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301358 qdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301359 QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001360 return -EINVAL; /* can't accept the tx mgmt frame */
1361 }
1362 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1363 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1364 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1365
1366 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301367 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
1368 QDF_NBUF_TX_PKT_MGMT_TRACK;
1369 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001370 htt_pkt_type_mgmt);
1371
1372 return 0; /* accepted the tx mgmt frame */
1373}
1374
1375void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1376{
1377 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1378}
1379
Nirav Shahcbc6d722016-03-01 16:24:53 +05301380qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1381 qdf_nbuf_t msdu, uint16_t peer_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382{
1383 struct ol_tx_desc_t *tx_desc;
1384 struct ol_txrx_msdu_info_t msdu_info;
1385
1386 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1387 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1388 msdu_info.peer = NULL;
1389 msdu_info.htt.action.tx_comp_req = 0;
1390 msdu_info.tso_info.is_tso = 0;
1391
1392 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1393 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1394
1395 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1396
1397 ol_tx_send(vdev->pdev, tx_desc, msdu);
1398
1399 return NULL;
1400}
1401
1402#if defined(FEATURE_TSO)
1403void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1404{
1405 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301406 struct qdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001407
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301408 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001409 pdev->tso_seg_pool.freelist = c_element;
1410 for (i = 0; i < (num_seg - 1); i++) {
1411 c_element->next =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301412 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
Leo Chang376398b2015-10-23 14:19:02 -07001413 c_element = c_element->next;
1414 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001415 }
Leo Chang376398b2015-10-23 14:19:02 -07001416 pdev->tso_seg_pool.pool_size = num_seg;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301417 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001418}
1419
1420void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1421{
Leo Chang376398b2015-10-23 14:19:02 -07001422 int i;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301423 struct qdf_tso_seg_elem_t *c_element;
1424 struct qdf_tso_seg_elem_t *temp;
Leo Chang376398b2015-10-23 14:19:02 -07001425
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301426 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001427 c_element = pdev->tso_seg_pool.freelist;
1428 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1429 temp = c_element->next;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301430 qdf_mem_free(c_element);
Leo Chang376398b2015-10-23 14:19:02 -07001431 c_element = temp;
1432 if (!c_element)
1433 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001434 }
1435
1436 pdev->tso_seg_pool.freelist = NULL;
1437 pdev->tso_seg_pool.num_free = 0;
1438 pdev->tso_seg_pool.pool_size = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301439 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1440 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001441}
1442#endif /* FEATURE_TSO */