blob: a33771745a0bef8c0ccb1b1c44a6401786344693 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Komal Seelam7fde14c2016-02-02 13:05:57 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
29#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
30#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
31#include <cdf_util.h> /* cdf_unlikely */
32
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
36#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
37#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
38
39/* internal header files relevant for all systems */
40#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
41#include <ol_txrx_types.h> /* pdev stats */
42#include <ol_tx_desc.h> /* ol_tx_desc */
43#include <ol_tx_send.h> /* ol_tx_send */
44#include <ol_txrx.h>
45
46/* internal header files relevant only for HL systems */
47#include <ol_tx_queue.h> /* ol_tx_enqueue */
48
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52
53#ifdef WLAN_FEATURE_FASTPATH
54#include <hif.h> /* HIF_DEVICE */
55#include <htc_api.h> /* Layering violation, but required for fast path */
56#include <htt_internal.h>
57#include <htt_types.h> /* htc_endpoint */
58
59int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
60 unsigned int num_msdus, unsigned int transfer_id);
61#endif /* WLAN_FEATURE_FASTPATH */
62
63/*
64 * The TXRX module doesn't accept tx frames unless the target has
65 * enough descriptors for them.
66 * For LL, the TXRX descriptor pool is sized to match the target's
67 * descriptor pool. Hence, if the descriptor allocation in TXRX
68 * succeeds, that guarantees that the target has room to accept
69 * the new tx frame.
70 */
71#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
72 do { \
73 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
74 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
75 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
76 if (cdf_unlikely(!tx_desc)) { \
77 TXRX_STATS_MSDU_LIST_INCR( \
78 pdev, tx.dropped.host_reject, msdu); \
79 return msdu; /* the list of unaccepted MSDUs */ \
80 } \
81 } while (0)
82
Dhanashri Atre83d373d2015-07-28 16:45:59 -070083#if defined(FEATURE_TSO)
84/**
85 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
86 * related information in the msdu_info meta data
87 * @vdev: virtual device handle
88 * @msdu: network buffer
89 * @msdu_info: meta data associated with the msdu
90 *
91 * Return: 0 - success, >0 - error
92 */
93static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
94 cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
95{
96 msdu_info->tso_info.curr_seg = NULL;
97 if (cdf_nbuf_is_tso(msdu)) {
98 int num_seg = cdf_nbuf_get_tso_num_seg(msdu);
99 msdu_info->tso_info.tso_seg_list = NULL;
100 msdu_info->tso_info.num_segs = num_seg;
101 while (num_seg) {
102 struct cdf_tso_seg_elem_t *tso_seg =
103 ol_tso_alloc_segment(vdev->pdev);
104 if (tso_seg) {
105 tso_seg->next =
106 msdu_info->tso_info.tso_seg_list;
107 msdu_info->tso_info.tso_seg_list
108 = tso_seg;
109 num_seg--;
110 } else {
111 struct cdf_tso_seg_elem_t *next_seg;
112 struct cdf_tso_seg_elem_t *free_seg =
113 msdu_info->tso_info.tso_seg_list;
114 cdf_print("TSO seg alloc failed!\n");
115 while (free_seg) {
116 next_seg = free_seg->next;
117 ol_tso_free_segment(vdev->pdev,
118 free_seg);
119 free_seg = next_seg;
120 }
121 return 1;
122 }
123 }
124 cdf_nbuf_get_tso_info(vdev->pdev->osdev,
125 msdu, &(msdu_info->tso_info));
126 msdu_info->tso_info.curr_seg =
127 msdu_info->tso_info.tso_seg_list;
128 num_seg = msdu_info->tso_info.num_segs;
129 } else {
130 msdu_info->tso_info.is_tso = 0;
131 msdu_info->tso_info.num_segs = 1;
132 }
133 return 0;
134}
135#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136
137/**
138 * ol_tx_send_data_frame() - send data frame
139 * @sta_id: sta id
140 * @skb: skb
141 * @proto_type: proto type
142 *
143 * Return: skb/NULL for success
144 */
145cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
146 uint8_t proto_type)
147{
148 void *cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
149 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
150 struct ol_txrx_peer_t *peer;
151 cdf_nbuf_t ret;
152 CDF_STATUS status;
153
154 if (cdf_unlikely(!pdev)) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800155 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156 "%s:pdev is null", __func__);
157 return skb;
158 }
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800159 if (cdf_unlikely(!cdf_ctx)) {
160 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
161 "%s:cdf_ctx is null", __func__);
162 return skb;
163 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164
165 if (sta_id >= WLAN_MAX_STA_COUNT) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800166 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167 "%s:Invalid sta id", __func__);
168 return skb;
169 }
170
171 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
172 if (!peer) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800173 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174 "%s:Invalid peer", __func__);
175 return skb;
176 }
177
178 if (peer->state < ol_txrx_peer_state_conn) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800179 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 "%s: station to be yet registered..dropping pkt", __func__);
181 return skb;
182 }
183
184 status = cdf_nbuf_map_single(cdf_ctx, skb, CDF_DMA_TO_DEVICE);
185 if (cdf_unlikely(status != CDF_STATUS_SUCCESS)) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800186 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800187 "%s: nbuf map failed", __func__);
188 return skb;
189 }
190
191 cdf_nbuf_trace_set_proto_type(skb, proto_type);
192
193 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
194 && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
195 && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
196 cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
197
198 /* Terminate the (single-element) list of tx frames */
199 cdf_nbuf_set_next(skb, NULL);
200 ret = OL_TX_LL(peer->vdev, skb);
201 if (ret) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800202 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 "%s: Failed to tx", __func__);
204 cdf_nbuf_unmap_single(cdf_ctx, ret, CDF_DMA_TO_DEVICE);
205 return ret;
206 }
207
208 return NULL;
209}
210
211#ifdef IPA_OFFLOAD
212/**
213 * ol_tx_send_ipa_data_frame() - send IPA data frame
214 * @vdev: vdev
215 * @skb: skb
216 *
217 * Return: skb/ NULL is for success
218 */
219cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
220 cdf_nbuf_t skb)
221{
222 ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
223 cdf_nbuf_t ret;
224
225 if (cdf_unlikely(!pdev)) {
226 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
227 "%s: pdev is NULL", __func__);
228 return skb;
229 }
230
231 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
232 && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
233 && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
234 cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
235
236 /* Terminate the (single-element) list of tx frames */
237 cdf_nbuf_set_next(skb, NULL);
238 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
239 if (ret) {
240 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
241 "%s: Failed to tx", __func__);
242 return ret;
243 }
244
245 return NULL;
246}
247#endif
248
249
250#if defined(FEATURE_TSO)
251cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
252{
253 cdf_nbuf_t msdu = msdu_list;
254 struct ol_txrx_msdu_info_t msdu_info;
255
256 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
257 msdu_info.htt.action.tx_comp_req = 0;
258 /*
259 * The msdu_list variable could be used instead of the msdu var,
260 * but just to clarify which operations are done on a single MSDU
261 * vs. a list of MSDUs, use a distinct variable for single MSDUs
262 * within the list.
263 */
264 while (msdu) {
265 cdf_nbuf_t next;
266 struct ol_tx_desc_t *tx_desc;
267 int segments = 1;
268
269 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
270 msdu_info.peer = NULL;
271
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700272 if (cdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
273 cdf_print("ol_tx_prepare_tso failed\n");
274 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
275 tx.dropped.host_reject, msdu);
276 return msdu;
277 }
278
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800279 segments = msdu_info.tso_info.num_segs;
280
281 /*
282 * The netbuf may get linked into a different list inside the
283 * ol_tx_send function, so store the next pointer before the
284 * tx_send call.
285 */
286 next = cdf_nbuf_next(msdu);
287 /* init the current segment to the 1st segment in the list */
288 while (segments) {
289
290 if (msdu_info.tso_info.curr_seg)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800291 NBUF_CB_PADDR(msdu) =
292 msdu_info.tso_info.curr_seg->
293 seg.tso_frags[0].paddr_low_32;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294
295 segments--;
296
297 /**
298 * if this is a jumbo nbuf, then increment the number
299 * of nbuf users for each additional segment of the msdu.
300 * This will ensure that the skb is freed only after
301 * receiving tx completion for all segments of an nbuf
302 */
303 if (segments)
304 cdf_nbuf_inc_users(msdu);
305
306 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
307
308 /*
309 * If debug display is enabled, show the meta-data being
310 * downloaded to the target via the HTT tx descriptor.
311 */
312 htt_tx_desc_display(tx_desc->htt_tx_desc);
313
314 ol_tx_send(vdev->pdev, tx_desc, msdu);
315
316 if (msdu_info.tso_info.curr_seg) {
317 msdu_info.tso_info.curr_seg =
318 msdu_info.tso_info.curr_seg->next;
319 }
320
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800321 cdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322
323 if (msdu_info.tso_info.is_tso) {
324 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
325 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
326 }
327 } /* while segments */
328
329 msdu = next;
330 if (msdu_info.tso_info.is_tso) {
331 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
332 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
333 }
334 } /* while msdus */
335 return NULL; /* all MSDUs were accepted */
336}
337#else /* TSO */
338
339cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
340{
341 cdf_nbuf_t msdu = msdu_list;
342 struct ol_txrx_msdu_info_t msdu_info;
343
344 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
345 msdu_info.htt.action.tx_comp_req = 0;
346 msdu_info.tso_info.is_tso = 0;
347 /*
348 * The msdu_list variable could be used instead of the msdu var,
349 * but just to clarify which operations are done on a single MSDU
350 * vs. a list of MSDUs, use a distinct variable for single MSDUs
351 * within the list.
352 */
353 while (msdu) {
354 cdf_nbuf_t next;
355 struct ol_tx_desc_t *tx_desc;
356
357 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
358 msdu_info.peer = NULL;
359 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
360
361 /*
362 * If debug display is enabled, show the meta-data being
363 * downloaded to the target via the HTT tx descriptor.
364 */
365 htt_tx_desc_display(tx_desc->htt_tx_desc);
366 /*
367 * The netbuf may get linked into a different list inside the
368 * ol_tx_send function, so store the next pointer before the
369 * tx_send call.
370 */
371 next = cdf_nbuf_next(msdu);
372 ol_tx_send(vdev->pdev, tx_desc, msdu);
373 msdu = next;
374 }
375 return NULL; /* all MSDUs were accepted */
376}
377#endif /* TSO */
378
379#ifdef WLAN_FEATURE_FASTPATH
380/**
381 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
382 *
383 * Allocate and prepare Tx descriptor with msdu and fragment descritor
384 * inforamtion.
385 *
386 * @pdev: pointer to ol pdev handle
387 * @vdev: pointer to ol vdev handle
388 * @msdu: linked list of msdu packets
389 * @pkt_download_len: packet download length
390 * @ep_id: endpoint ID
391 * @msdu_info: Handle to msdu_info
392 *
393 * Return: Pointer to Tx descriptor
394 */
395static inline struct ol_tx_desc_t *
396ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
397 ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
398 uint32_t pkt_download_len, uint32_t ep_id,
399 struct ol_txrx_msdu_info_t *msdu_info)
400{
401 struct ol_tx_desc_t *tx_desc = NULL;
402 uint32_t *htt_tx_desc;
403 void *htc_hdr_vaddr;
404 u_int32_t num_frags, i;
405
406 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
407 if (cdf_unlikely(!tx_desc))
408 return NULL;
409
410 tx_desc->netbuf = msdu;
411 if (msdu_info->tso_info.is_tso) {
412 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
413 tx_desc->pkt_type = ol_tx_frm_tso;
414 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
415 } else {
416 tx_desc->pkt_type = ol_tx_frm_std;
417 }
418
419 htt_tx_desc = tx_desc->htt_tx_desc;
420
421 /* Make sure frags num is set to 0 */
422 /*
423 * Do this here rather than in hardstart, so
424 * that we can hopefully take only one cache-miss while
425 * accessing skb->cb.
426 */
427
428 /* HTT Header */
429 /* TODO : Take care of multiple fragments */
430
431 /* TODO: Precompute and store paddr in ol_tx_desc_t */
432 /* Virtual address of the HTT/HTC header, added by driver */
433 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
434 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
435 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
436 &msdu_info->htt, &msdu_info->tso_info,
437 NULL, vdev->opmode == wlan_op_mode_ocb);
438
439 num_frags = cdf_nbuf_get_num_frags(msdu);
440 /* num_frags are expected to be 2 max */
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800441 num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
442 ? NBUF_CB_TX_MAX_EXTRA_FRAGS
443 : num_frags;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444#if defined(HELIUMPLUS_PADDR64)
445 /*
446 * Use num_frags - 1, since 1 frag is used to store
447 * the HTT/HTC descriptor
448 * Refer to htt_tx_desc_init()
449 */
450 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
451 num_frags - 1);
452#else /* ! defined(HELIUMPLUSPADDR64) */
453 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
454 num_frags-1);
455#endif /* defined(HELIUMPLUS_PADDR64) */
456 if (msdu_info->tso_info.is_tso) {
457 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
458 tx_desc->htt_frag_desc, &msdu_info->tso_info);
459 TXRX_STATS_TSO_SEG_UPDATE(pdev,
460 msdu_info->tso_info.curr_seg->seg);
461 } else {
462 for (i = 1; i < num_frags; i++) {
463 cdf_size_t frag_len;
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800464 cdf_dma_addr_t frag_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465
466 frag_len = cdf_nbuf_get_frag_len(msdu, i);
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800467 frag_paddr = cdf_nbuf_get_frag_paddr(msdu, i);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468#if defined(HELIUMPLUS_PADDR64)
469 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
470 i - 1, frag_paddr, frag_len);
471#if defined(HELIUMPLUS_DEBUG)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800472 cdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473 __func__, __LINE__, tx_desc->htt_frag_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800474 i-1, frag_paddr, frag_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475 dump_pkt(netbuf, frag_paddr, 64);
476#endif /* HELIUMPLUS_DEBUG */
477#else /* ! defined(HELIUMPLUSPADDR64) */
478 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
479 i - 1, frag_paddr, frag_len);
480#endif /* defined(HELIUMPLUS_PADDR64) */
481 }
482 }
483
484 /*
485 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
486 * this is not required. We still have to mark the swap bit correctly,
487 * when posting to the ring
488 */
489 /* Check to make sure, data download length is correct */
490
491 /*
492 * TODO : Can we remove this check and always download a fixed length ?
493 * */
494 if (cdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
495 pkt_download_len = cdf_nbuf_len(msdu);
496
497 /* Fill the HTC header information */
498 /*
499 * Passing 0 as the seq_no field, we can probably get away
500 * with it for the time being, since this is not checked in f/w
501 */
502 /* TODO : Prefill this, look at multi-fragment case */
503 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
504
505 return tx_desc;
506}
507#if defined(FEATURE_TSO)
508/**
509 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
510 *
511 * @vdev: handle to ol_txrx_vdev_t
512 * @msdu_list: msdu list to be sent out.
513 *
514 * Return: on success return NULL, pointer to nbuf when it fails to send.
515 */
516cdf_nbuf_t
517ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
518{
519 cdf_nbuf_t msdu = msdu_list;
520 struct ol_txrx_pdev_t *pdev = vdev->pdev;
521 uint32_t pkt_download_len =
522 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
523 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
524 struct ol_txrx_msdu_info_t msdu_info;
525
526 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
527 msdu_info.htt.action.tx_comp_req = 0;
528 /*
529 * The msdu_list variable could be used instead of the msdu var,
530 * but just to clarify which operations are done on a single MSDU
531 * vs. a list of MSDUs, use a distinct variable for single MSDUs
532 * within the list.
533 */
534 while (msdu) {
535 cdf_nbuf_t next;
536 struct ol_tx_desc_t *tx_desc;
537 int segments = 1;
538
539 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
540 msdu_info.peer = NULL;
541
Dhanashri Atre83d373d2015-07-28 16:45:59 -0700542 if (cdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
543 cdf_print("ol_tx_prepare_tso failed\n");
544 TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
545 tx.dropped.host_reject, msdu);
546 return msdu;
547 }
548
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800549 segments = msdu_info.tso_info.num_segs;
550
551 /*
552 * The netbuf may get linked into a different list
553 * inside the ce_send_fast function, so store the next
554 * pointer before the ce_send call.
555 */
556 next = cdf_nbuf_next(msdu);
557 /* init the current segment to the 1st segment in the list */
558 while (segments) {
559
560 if (msdu_info.tso_info.curr_seg)
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800561 NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800562 curr_seg->seg.tso_frags[0].paddr_low_32;
563
564 segments--;
565
566 /**
567 * if this is a jumbo nbuf, then increment the number
568 * of nbuf users for each additional segment of the msdu.
569 * This will ensure that the skb is freed only after
570 * receiving tx completion for all segments of an nbuf
571 */
572 if (segments)
573 cdf_nbuf_inc_users(msdu);
574
575 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
576 msdu_info.htt.info.vdev_id = vdev->vdev_id;
577 msdu_info.htt.action.cksum_offload =
578 cdf_nbuf_get_tx_cksum(msdu);
579 switch (cdf_nbuf_get_exemption_type(msdu)) {
580 case CDF_NBUF_EXEMPT_NO_EXEMPTION:
581 case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
582 /* We want to encrypt this frame */
583 msdu_info.htt.action.do_encrypt = 1;
584 break;
585 case CDF_NBUF_EXEMPT_ALWAYS:
586 /* We don't want to encrypt this frame */
587 msdu_info.htt.action.do_encrypt = 0;
588 break;
589 default:
590 msdu_info.htt.action.do_encrypt = 1;
591 cdf_assert(0);
592 break;
593 }
594
595 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
596 pkt_download_len, ep_id,
597 &msdu_info);
598
599 if (cdf_likely(tx_desc)) {
600 /*
601 * If debug display is enabled, show the meta
602 * data being downloaded to the target via the
603 * HTT tx descriptor.
604 */
605 htt_tx_desc_display(tx_desc->htt_tx_desc);
606 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
607 1, ep_id))) {
608 /*
609 * The packet could not be sent.
610 * Free the descriptor, return the
611 * packet to the caller.
612 */
613 ol_tx_desc_free(pdev, tx_desc);
614 return msdu;
615 }
616 if (msdu_info.tso_info.curr_seg) {
617 msdu_info.tso_info.curr_seg =
618 msdu_info.tso_info.curr_seg->next;
619 }
620
621 if (msdu_info.tso_info.is_tso) {
Houston Hoffman43d47fa2016-02-24 16:34:30 -0800622 cdf_nbuf_reset_num_frags(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800623 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
624 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
625 }
626 } else {
627 TXRX_STATS_MSDU_LIST_INCR(
628 pdev, tx.dropped.host_reject, msdu);
629 /* the list of unaccepted MSDUs */
630 return msdu;
631 }
632 } /* while segments */
633
634 msdu = next;
635 if (msdu_info.tso_info.is_tso) {
636 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
637 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
638 }
639 } /* while msdus */
640 return NULL; /* all MSDUs were accepted */
641}
642#else
643cdf_nbuf_t
644ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
645{
646 cdf_nbuf_t msdu = msdu_list;
647 struct ol_txrx_pdev_t *pdev = vdev->pdev;
648 uint32_t pkt_download_len =
649 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
650 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
651 struct ol_txrx_msdu_info_t msdu_info;
652
653 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
654 msdu_info.htt.action.tx_comp_req = 0;
655 msdu_info.tso_info.is_tso = 0;
656 /*
657 * The msdu_list variable could be used instead of the msdu var,
658 * but just to clarify which operations are done on a single MSDU
659 * vs. a list of MSDUs, use a distinct variable for single MSDUs
660 * within the list.
661 */
662 while (msdu) {
663 cdf_nbuf_t next;
664 struct ol_tx_desc_t *tx_desc;
665
666 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
667 msdu_info.peer = NULL;
668
669 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
670 msdu_info.htt.info.vdev_id = vdev->vdev_id;
671 msdu_info.htt.action.cksum_offload =
672 cdf_nbuf_get_tx_cksum(msdu);
673 switch (cdf_nbuf_get_exemption_type(msdu)) {
674 case CDF_NBUF_EXEMPT_NO_EXEMPTION:
675 case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
676 /* We want to encrypt this frame */
677 msdu_info.htt.action.do_encrypt = 1;
678 break;
679 case CDF_NBUF_EXEMPT_ALWAYS:
680 /* We don't want to encrypt this frame */
681 msdu_info.htt.action.do_encrypt = 0;
682 break;
683 default:
684 msdu_info.htt.action.do_encrypt = 1;
685 cdf_assert(0);
686 break;
687 }
688
689 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
690 pkt_download_len, ep_id,
691 &msdu_info);
692
693 if (cdf_likely(tx_desc)) {
694 /*
695 * If debug display is enabled, show the meta-data being
696 * downloaded to the target via the HTT tx descriptor.
697 */
698 htt_tx_desc_display(tx_desc->htt_tx_desc);
699 /*
700 * The netbuf may get linked into a different list
701 * inside the ce_send_fast function, so store the next
702 * pointer before the ce_send call.
703 */
704 next = cdf_nbuf_next(msdu);
705 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
706 ep_id))) {
707 /* The packet could not be sent */
708 /* Free the descriptor, return the packet to the
709 * caller */
710 ol_tx_desc_free(pdev, tx_desc);
711 return msdu;
712 }
713 msdu = next;
714 } else {
715 TXRX_STATS_MSDU_LIST_INCR(
716 pdev, tx.dropped.host_reject, msdu);
717 return msdu; /* the list of unaccepted MSDUs */
718 }
719 }
720
721 return NULL; /* all MSDUs were accepted */
722}
723#endif /* FEATURE_TSO */
724#endif /* WLAN_FEATURE_FASTPATH */
725
726#ifdef WLAN_FEATURE_FASTPATH
727/**
728 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
729 *
730 */
731static inline cdf_nbuf_t
732ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
733{
Komal Seelam3d202862016-02-24 18:43:24 +0530734 struct hif_opaque_softc *hif_device =
735 (struct hif_opaque_softc *)cds_get_context(CDF_MODULE_ID_HIF);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736
Komal Seelam7fde14c2016-02-02 13:05:57 +0530737 if (cdf_likely(hif_device && hif_is_fastpath_mode_enabled(hif_device)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
739 else
740 msdu_list = ol_tx_ll(vdev, msdu_list);
741
742 return msdu_list;
743}
744#else
745static inline cdf_nbuf_t
746ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
747{
748 return ol_tx_ll(vdev, msdu_list);
749}
750#endif /* WLAN_FEATURE_FASTPATH */
751
752#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
753
754#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
755#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
756static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
757{
758 int max_to_accept;
759
760 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
761 if (vdev->ll_pause.paused_reason) {
762 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
763 return;
764 }
765
766 /*
767 * Send as much of the backlog as possible, but leave some margin
768 * of unallocated tx descriptors that can be used for new frames
769 * being transmitted by other vdevs.
770 * Ideally there would be a scheduler, which would not only leave
771 * some margin for new frames for other vdevs, but also would
772 * fairly apportion the tx descriptors between multiple vdevs that
773 * have backlogs in their pause queues.
774 * However, the fairness benefit of having a scheduler for frames
775 * from multiple vdev's pause queues is not sufficient to outweigh
776 * the extra complexity.
777 */
778 max_to_accept = vdev->pdev->tx_desc.num_free -
779 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
780 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
781 cdf_nbuf_t tx_msdu;
782 max_to_accept--;
783 vdev->ll_pause.txq.depth--;
784 tx_msdu = vdev->ll_pause.txq.head;
785 if (tx_msdu) {
786 vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
787 if (NULL == vdev->ll_pause.txq.head)
788 vdev->ll_pause.txq.tail = NULL;
789 cdf_nbuf_set_next(tx_msdu, NULL);
790 NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
791 NBUF_TX_PKT_TXRX_DEQUEUE);
792 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
793 /*
794 * It is unexpected that ol_tx_ll would reject the frame
795 * since we checked that there's room for it, though
796 * there's an infinitesimal possibility that between the
797 * time we checked the room available and now, a
798 * concurrent batch of tx frames used up all the room.
799 * For simplicity, just drop the frame.
800 */
801 if (tx_msdu) {
802 cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
803 CDF_DMA_TO_DEVICE);
804 cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
805 }
806 }
807 }
808 if (vdev->ll_pause.txq.depth) {
809 cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
810 cdf_softirq_timer_start(&vdev->ll_pause.timer,
811 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
812 vdev->ll_pause.is_q_timer_on = true;
813 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
814 vdev->ll_pause.q_overflow_cnt++;
815 }
816
817 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
818}
819
820static cdf_nbuf_t
821ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
822 cdf_nbuf_t msdu_list, uint8_t start_timer)
823{
824 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
825 while (msdu_list &&
826 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
827 cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
828 NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
829 DPTRACE(cdf_dp_trace(msdu_list,
830 CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
831 (uint8_t *)(cdf_nbuf_data(msdu_list)),
832 sizeof(cdf_nbuf_data(msdu_list))));
833
834 vdev->ll_pause.txq.depth++;
835 if (!vdev->ll_pause.txq.head) {
836 vdev->ll_pause.txq.head = msdu_list;
837 vdev->ll_pause.txq.tail = msdu_list;
838 } else {
839 cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
840 }
841 vdev->ll_pause.txq.tail = msdu_list;
842
843 msdu_list = next;
844 }
845 if (vdev->ll_pause.txq.tail)
846 cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
847
848 if (start_timer) {
849 cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
850 cdf_softirq_timer_start(&vdev->ll_pause.timer,
851 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
852 vdev->ll_pause.is_q_timer_on = true;
853 }
854 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
855
856 return msdu_list;
857}
858
859/*
860 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
861 * If there are too many frames in the tx queue, reject it.
862 */
863cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
864{
865 uint16_t eth_type;
866 uint32_t paused_reason;
867
868 if (msdu_list == NULL)
869 return NULL;
870
871 paused_reason = vdev->ll_pause.paused_reason;
872 if (paused_reason) {
873 if (cdf_unlikely((paused_reason &
874 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
875 paused_reason)) {
876 eth_type = (((struct ethernet_hdr_t *)
877 cdf_nbuf_data(msdu_list))->
878 ethertype[0] << 8) |
879 (((struct ethernet_hdr_t *)
880 cdf_nbuf_data(msdu_list))->ethertype[1]);
881 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
882 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
883 return msdu_list;
884 }
885 }
886 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
887 } else {
888 if (vdev->ll_pause.txq.depth > 0 ||
889 vdev->pdev->tx_throttle.current_throttle_level !=
890 THROTTLE_LEVEL_0) {
891 /* not paused, but there is a backlog of frms
892 from a prior pause or throttle off phase */
893 msdu_list = ol_tx_vdev_pause_queue_append(
894 vdev, msdu_list, 0);
895 /* if throttle is disabled or phase is "on",
896 send the frame */
897 if (vdev->pdev->tx_throttle.current_throttle_level ==
898 THROTTLE_LEVEL_0 ||
899 vdev->pdev->tx_throttle.current_throttle_phase ==
900 THROTTLE_PHASE_ON) {
901 /* send as many frames as possible
902 from the vdevs backlog */
903 ol_tx_vdev_ll_pause_queue_send_base(vdev);
904 }
905 } else {
906 /* not paused, no throttle and no backlog -
907 send the new frames */
908 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
909 }
910 }
911 return msdu_list;
912}
913
914/*
915 * Run through the transmit queues for all the vdevs and
916 * send the pending frames
917 */
918void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
919{
920 int max_to_send; /* tracks how many frames have been sent */
921 cdf_nbuf_t tx_msdu;
922 struct ol_txrx_vdev_t *vdev = NULL;
923 uint8_t more;
924
925 if (NULL == pdev)
926 return;
927
928 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
929 return;
930
931 /* ensure that we send no more than tx_threshold frames at once */
932 max_to_send = pdev->tx_throttle.tx_threshold;
933
934 /* round robin through the vdev queues for the given pdev */
935
936 /* Potential improvement: download several frames from the same vdev
937 at a time, since it is more likely that those frames could be
938 aggregated together, remember which vdev was serviced last,
939 so the next call this function can resume the round-robin
940 traversing where the current invocation left off */
941 do {
942 more = 0;
943 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
944
945 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
946 if (vdev->ll_pause.txq.depth) {
947 if (vdev->ll_pause.paused_reason) {
948 cdf_spin_unlock_bh(&vdev->ll_pause.
949 mutex);
950 continue;
951 }
952
953 tx_msdu = vdev->ll_pause.txq.head;
954 if (NULL == tx_msdu) {
955 cdf_spin_unlock_bh(&vdev->ll_pause.
956 mutex);
957 continue;
958 }
959
960 max_to_send--;
961 vdev->ll_pause.txq.depth--;
962
963 vdev->ll_pause.txq.head =
964 cdf_nbuf_next(tx_msdu);
965
966 if (NULL == vdev->ll_pause.txq.head)
967 vdev->ll_pause.txq.tail = NULL;
968
969 cdf_nbuf_set_next(tx_msdu, NULL);
970 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
971 /*
972 * It is unexpected that ol_tx_ll would reject
973 * the frame, since we checked that there's
974 * room for it, though there's an infinitesimal
975 * possibility that between the time we checked
976 * the room available and now, a concurrent
977 * batch of tx frames used up all the room.
978 * For simplicity, just drop the frame.
979 */
980 if (tx_msdu) {
981 cdf_nbuf_unmap(pdev->osdev, tx_msdu,
982 CDF_DMA_TO_DEVICE);
983 cdf_nbuf_tx_free(tx_msdu,
984 NBUF_PKT_ERROR);
985 }
986 }
987 /*check if there are more msdus to transmit */
988 if (vdev->ll_pause.txq.depth)
989 more = 1;
990 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
991 }
992 } while (more && max_to_send);
993
994 vdev = NULL;
995 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
996 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
997 if (vdev->ll_pause.txq.depth) {
998 cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
999 cdf_softirq_timer_start(
1000 &pdev->tx_throttle.tx_timer,
1001 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
1002 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1003 return;
1004 }
1005 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
1006 }
1007}
1008
1009void ol_tx_vdev_ll_pause_queue_send(void *context)
1010{
1011 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
1012 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1013
1014 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
1015 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
1016 return;
1017 ol_tx_vdev_ll_pause_queue_send_base(vdev);
1018}
1019#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1020
1021static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
1022{
1023 return
1024 tx_spec &
1025 (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
1026}
1027
1028static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
1029{
1030 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
1031
1032 if (tx_spec & ol_tx_spec_no_aggr)
1033 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
1034 if (tx_spec & ol_tx_spec_no_encrypt)
1035 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1036 if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
1037 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1038 return sub_type;
1039}
1040
1041cdf_nbuf_t
1042ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
1043 enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
1044{
1045 cdf_nbuf_t msdu = msdu_list;
1046 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1047 struct ol_txrx_msdu_info_t msdu_info;
1048
1049 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1050 msdu_info.htt.action.tx_comp_req = 0;
1051
1052 /*
1053 * The msdu_list variable could be used instead of the msdu var,
1054 * but just to clarify which operations are done on a single MSDU
1055 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1056 * within the list.
1057 */
1058 while (msdu) {
1059 cdf_nbuf_t next;
1060 struct ol_tx_desc_t *tx_desc;
1061
1062 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
1063 msdu_info.peer = NULL;
1064 msdu_info.tso_info.is_tso = 0;
1065
1066 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1067
1068 /*
1069 * The netbuf may get linked into a different list inside the
1070 * ol_tx_send function, so store the next pointer before the
1071 * tx_send call.
1072 */
1073 next = cdf_nbuf_next(msdu);
1074
1075 if (tx_spec != ol_tx_spec_std) {
1076 if (tx_spec & ol_tx_spec_no_free) {
1077 tx_desc->pkt_type = ol_tx_frm_no_free;
1078 } else if (tx_spec & ol_tx_spec_tso) {
1079 tx_desc->pkt_type = ol_tx_frm_tso;
1080 } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
1081 uint8_t sub_type =
1082 ol_txrx_tx_raw_subtype(tx_spec);
1083 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1084 htt_pkt_type_native_wifi,
1085 sub_type);
1086 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1087 /* different types of raw frames */
1088 uint8_t sub_type =
1089 ol_txrx_tx_raw_subtype(tx_spec);
1090 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1091 htt_pkt_type_raw, sub_type);
1092 }
1093 }
1094 /*
1095 * If debug display is enabled, show the meta-data being
1096 * downloaded to the target via the HTT tx descriptor.
1097 */
1098 htt_tx_desc_display(tx_desc->htt_tx_desc);
1099 ol_tx_send(vdev->pdev, tx_desc, msdu);
1100 msdu = next;
1101 }
1102 return NULL; /* all MSDUs were accepted */
1103}
1104
1105#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1106#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1107 do { \
1108 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
1109 cdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
1110 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1111 if (tx_msdu_info.peer) { \
1112 /* remove the peer reference added above */ \
1113 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1114 } \
1115 goto MSDU_LOOP_BOTTOM; \
1116 } \
1117 } while (0)
1118#else
1119#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1120#endif
1121
1122/* tx filtering is handled within the target FW */
1123#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1124
1125/**
1126 * parse_ocb_tx_header() - Function to check for OCB
1127 * TX control header on a packet and extract it if present
1128 *
1129 * @msdu: Pointer to OS packet (cdf_nbuf_t)
1130 */
1131#define OCB_HEADER_VERSION 1
1132bool parse_ocb_tx_header(cdf_nbuf_t msdu,
1133 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1134{
1135 struct ether_header *eth_hdr_p;
1136 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1137
1138 /* Check if TX control header is present */
1139 eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
1140 if (eth_hdr_p->ether_type != CDF_SWAP_U16(ETHERTYPE_OCB_TX))
1141 /* TX control header is not present. Nothing to do.. */
1142 return true;
1143
1144 /* Remove the ethernet header */
1145 cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
1146
1147 /* Parse the TX control header */
1148 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
1149
1150 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1151 if (tx_ctrl)
1152 cdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
1153 sizeof(*tx_ctrl_hdr));
1154 } else {
1155 /* The TX control header is invalid. */
1156 return false;
1157 }
1158
1159 /* Remove the TX control header */
1160 cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
1161 return true;
1162}
1163
1164cdf_nbuf_t
1165ol_tx_non_std(ol_txrx_vdev_handle vdev,
1166 enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
1167{
1168 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1169}
1170
1171void
1172ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1173 ol_txrx_data_tx_cb callback, void *ctxt)
1174{
1175 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1176 pdev->tx_data_callback.func = callback;
1177 pdev->tx_data_callback.ctxt = ctxt;
1178}
1179
1180void
1181ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1182 uint8_t type,
1183 ol_txrx_mgmt_tx_cb download_cb,
1184 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1185{
1186 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1187 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1188 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1189 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1190}
1191
1192#if defined(HELIUMPLUS_PADDR64)
1193void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1194{
1195 uint32_t *frag_ptr_i_p;
1196 int i;
1197
1198 cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001199 tx_desc, tx_desc->id);
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001200 cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001201 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001202 cdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
1203 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001204
1205 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1206 is already de-referrable (=> in virtual address space) */
1207 frag_ptr_i_p = tx_desc->htt_frag_desc;
1208
1209 /* Dump 6 words of TSO flags */
1210 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1211 DUMP_PREFIX_NONE, 8, 4,
1212 frag_ptr_i_p, 24, true);
1213
1214 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1215
1216 i = 0;
1217 while (*frag_ptr_i_p) {
1218 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1219 DUMP_PREFIX_NONE, 8, 4,
1220 frag_ptr_i_p, 8, true);
1221 i++;
1222 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1223 break;
1224 else /* jump to next pointer - skip length */
1225 frag_ptr_i_p += 2;
1226 }
1227 return;
1228}
1229#endif /* HELIUMPLUS_PADDR64 */
1230
1231int
1232ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
1233 cdf_nbuf_t tx_mgmt_frm,
1234 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1235{
1236 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1237 struct ol_tx_desc_t *tx_desc;
1238 struct ol_txrx_msdu_info_t tx_msdu_info;
1239
1240 tx_msdu_info.tso_info.is_tso = 0;
1241
1242 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1243 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1244 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1245 tx_msdu_info.htt.action.do_tx_complete =
1246 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1247
1248 /*
1249 * FIX THIS: l2_hdr_type should only specify L2 header type
1250 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1251 * that is a combination of L2 header type and 802.11 frame type.
1252 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1253 * But if the 802.11 frame type is "data", then the HTT pkt type is
1254 * the L2 header type (more or less): 802.3 vs. Native WiFi
1255 * (basic 802.11).
1256 * (Or the header type can be "raw", which is any version of the 802.11
1257 * header, and also implies that some of the offloaded tx data
1258 * processing steps may not apply.)
1259 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1260 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1261 * needs to overload the l2_hdr_type to indicate whether the frame is
1262 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1263 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1264 * just the L2 header type. For mgmt frames, there should be a
1265 * separate function to patch the HTT pkt type to store a "mgmt" value
1266 * rather than the L2 header type. Then the HTT pkt type can be
1267 * programmed efficiently for data frames, and the msdu_info's
1268 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1269 * frame type rather than the L2 header type.
1270 */
1271 /*
1272 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1273 * htt_pkt_type_mgmt
1274 * The htt module expects a "enum htt_pkt_type" value.
1275 * The htt_dxe module expects a "enum htt_frm_type" value.
1276 * This needs to be cleaned up, so both versions of htt use a
1277 * consistent method of specifying the frame type.
1278 */
1279#ifdef QCA_SUPPORT_INTEGRATED_SOC
1280 /* tx mgmt frames always come with a 802.11 header */
1281 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1282 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1283#else
1284 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1285 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1286#endif
1287
1288 tx_msdu_info.peer = NULL;
1289
1290 cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, CDF_DMA_TO_DEVICE);
1291 /* For LL tx_comp_req is not used so initialized to 0 */
1292 tx_msdu_info.htt.action.tx_comp_req = 0;
1293 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1294 /* FIX THIS -
1295 * The FW currently has trouble using the host's fragments table
1296 * for management frames. Until this is fixed, rather than
1297 * specifying the fragment table to the FW, specify just the
1298 * address of the initial fragment.
1299 */
1300#if defined(HELIUMPLUS_PADDR64)
1301 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1302 tx_desc); */
1303#endif /* defined(HELIUMPLUS_PADDR64) */
1304 if (tx_desc) {
1305 /*
1306 * Following the call to ol_tx_desc_ll, frag 0 is the
1307 * HTT tx HW descriptor, and the frame payload is in
1308 * frag 1.
1309 */
1310 htt_tx_desc_frags_table_set(
1311 pdev->htt_pdev,
1312 tx_desc->htt_tx_desc,
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001313 cdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001314 0, 0);
1315#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1316 dump_frag_desc(
1317 "after htt_tx_desc_frags_table_set",
1318 tx_desc);
1319#endif /* defined(HELIUMPLUS_PADDR64) */
1320 }
1321 if (!tx_desc) {
1322 cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
1323 CDF_DMA_TO_DEVICE);
1324 return -EINVAL; /* can't accept the tx mgmt frame */
1325 }
1326 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1327 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1328 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1329
1330 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
Houston Hoffman43d47fa2016-02-24 16:34:30 -08001331 NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = NBUF_TX_PKT_MGMT_TRACK; ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001332 htt_pkt_type_mgmt);
1333
1334 return 0; /* accepted the tx mgmt frame */
1335}
1336
1337void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1338{
1339 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1340}
1341
1342cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1343 cdf_nbuf_t msdu, uint16_t peer_id)
1344{
1345 struct ol_tx_desc_t *tx_desc;
1346 struct ol_txrx_msdu_info_t msdu_info;
1347
1348 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1349 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1350 msdu_info.peer = NULL;
1351 msdu_info.htt.action.tx_comp_req = 0;
1352 msdu_info.tso_info.is_tso = 0;
1353
1354 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1355 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1356
1357 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1358
1359 ol_tx_send(vdev->pdev, tx_desc, msdu);
1360
1361 return NULL;
1362}
1363
1364#if defined(FEATURE_TSO)
1365void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1366{
1367 int i;
Leo Chang376398b2015-10-23 14:19:02 -07001368 struct cdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001369
Leo Chang376398b2015-10-23 14:19:02 -07001370 c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
1371 pdev->tso_seg_pool.freelist = c_element;
1372 for (i = 0; i < (num_seg - 1); i++) {
1373 c_element->next =
1374 cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
1375 c_element = c_element->next;
1376 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377 }
Leo Chang376398b2015-10-23 14:19:02 -07001378 pdev->tso_seg_pool.pool_size = num_seg;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001379 cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
1380}
1381
1382void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1383{
Leo Chang376398b2015-10-23 14:19:02 -07001384 int i;
1385 struct cdf_tso_seg_elem_t *c_element;
1386 struct cdf_tso_seg_elem_t *temp;
1387
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001388 cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001389 c_element = pdev->tso_seg_pool.freelist;
1390 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1391 temp = c_element->next;
1392 cdf_mem_free(c_element);
1393 c_element = temp;
1394 if (!c_element)
1395 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001396 }
1397
1398 pdev->tso_seg_pool.freelist = NULL;
1399 pdev->tso_seg_pool.num_free = 0;
1400 pdev->tso_seg_pool.pool_size = 0;
1401 cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1402 cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
1403}
1404#endif /* FEATURE_TSO */