blob: 0f718389d70202e4730c72418d741e2d5b4e463f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
29#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
30#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
31#include <cdf_util.h> /* cdf_unlikely */
32
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
36#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
37#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
38
39/* internal header files relevant for all systems */
40#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
41#include <ol_txrx_types.h> /* pdev stats */
42#include <ol_tx_desc.h> /* ol_tx_desc */
43#include <ol_tx_send.h> /* ol_tx_send */
44#include <ol_txrx.h>
45
46/* internal header files relevant only for HL systems */
47#include <ol_tx_queue.h> /* ol_tx_enqueue */
48
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52
53#ifdef WLAN_FEATURE_FASTPATH
54#include <hif.h> /* HIF_DEVICE */
55#include <htc_api.h> /* Layering violation, but required for fast path */
56#include <htt_internal.h>
57#include <htt_types.h> /* htc_endpoint */
58
59int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
60 unsigned int num_msdus, unsigned int transfer_id);
61#endif /* WLAN_FEATURE_FASTPATH */
62
63/*
64 * The TXRX module doesn't accept tx frames unless the target has
65 * enough descriptors for them.
66 * For LL, the TXRX descriptor pool is sized to match the target's
67 * descriptor pool. Hence, if the descriptor allocation in TXRX
68 * succeeds, that guarantees that the target has room to accept
69 * the new tx frame.
70 */
71#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
72 do { \
73 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
74 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
75 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
76 if (cdf_unlikely(!tx_desc)) { \
77 TXRX_STATS_MSDU_LIST_INCR( \
78 pdev, tx.dropped.host_reject, msdu); \
79 return msdu; /* the list of unaccepted MSDUs */ \
80 } \
81 } while (0)
82
83#define ol_tx_prepare_tso(vdev, msdu, msdu_info) \
84 do { \
85 msdu_info.tso_info.curr_seg = NULL; \
86 if (cdf_nbuf_is_tso(msdu)) { \
87 int num_seg = cdf_nbuf_get_tso_num_seg(msdu); \
88 msdu_info.tso_info.tso_seg_list = NULL; \
89 msdu_info.tso_info.num_segs = num_seg; \
90 while (num_seg) { \
91 struct cdf_tso_seg_elem_t *tso_seg = \
92 ol_tso_alloc_segment(vdev->pdev); \
93 if (tso_seg) { \
94 tso_seg->next = \
95 msdu_info.tso_info.tso_seg_list; \
96 msdu_info.tso_info.tso_seg_list \
97 = tso_seg; \
98 num_seg--; \
99 } else {\
100 cdf_print("TSO seg alloc failed!\n"); \
101 } \
102 } \
103 cdf_nbuf_get_tso_info(vdev->pdev->osdev, \
104 msdu, &msdu_info.tso_info); \
105 msdu_info.tso_info.curr_seg = \
106 msdu_info.tso_info.tso_seg_list; \
107 num_seg = msdu_info.tso_info.num_segs; \
108 } else { \
109 msdu_info.tso_info.is_tso = 0; \
110 msdu_info.tso_info.num_segs = 1; \
111 } \
112 } while (0)
113
114/**
115 * ol_tx_send_data_frame() - send data frame
116 * @sta_id: sta id
117 * @skb: skb
118 * @proto_type: proto type
119 *
120 * Return: skb/NULL for success
121 */
122cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
123 uint8_t proto_type)
124{
125 void *cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
126 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
127 struct ol_txrx_peer_t *peer;
128 cdf_nbuf_t ret;
129 CDF_STATUS status;
130
131 if (cdf_unlikely(!pdev)) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800132 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800133 "%s:pdev is null", __func__);
134 return skb;
135 }
Orhan K AKYILDIZc4094612015-11-11 18:01:15 -0800136 if (cdf_unlikely(!cdf_ctx)) {
137 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
138 "%s:cdf_ctx is null", __func__);
139 return skb;
140 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800141
142 if (sta_id >= WLAN_MAX_STA_COUNT) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800143 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144 "%s:Invalid sta id", __func__);
145 return skb;
146 }
147
148 peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
149 if (!peer) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800150 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800151 "%s:Invalid peer", __func__);
152 return skb;
153 }
154
155 if (peer->state < ol_txrx_peer_state_conn) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800156 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157 "%s: station to be yet registered..dropping pkt", __func__);
158 return skb;
159 }
160
161 status = cdf_nbuf_map_single(cdf_ctx, skb, CDF_DMA_TO_DEVICE);
162 if (cdf_unlikely(status != CDF_STATUS_SUCCESS)) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800163 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 "%s: nbuf map failed", __func__);
165 return skb;
166 }
167
168 cdf_nbuf_trace_set_proto_type(skb, proto_type);
169
170 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
171 && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
172 && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
173 cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
174
175 /* Terminate the (single-element) list of tx frames */
176 cdf_nbuf_set_next(skb, NULL);
177 ret = OL_TX_LL(peer->vdev, skb);
178 if (ret) {
Dhanashri Atre98815d22015-11-09 14:51:26 -0800179 CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 "%s: Failed to tx", __func__);
181 cdf_nbuf_unmap_single(cdf_ctx, ret, CDF_DMA_TO_DEVICE);
182 return ret;
183 }
184
185 return NULL;
186}
187
188#ifdef IPA_OFFLOAD
189/**
190 * ol_tx_send_ipa_data_frame() - send IPA data frame
191 * @vdev: vdev
192 * @skb: skb
193 *
194 * Return: skb/ NULL is for success
195 */
196cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
197 cdf_nbuf_t skb)
198{
199 ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
200 cdf_nbuf_t ret;
201
202 if (cdf_unlikely(!pdev)) {
203 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
204 "%s: pdev is NULL", __func__);
205 return skb;
206 }
207
208 if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
209 && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
210 && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
211 cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
212
213 /* Terminate the (single-element) list of tx frames */
214 cdf_nbuf_set_next(skb, NULL);
215 ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
216 if (ret) {
217 TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
218 "%s: Failed to tx", __func__);
219 return ret;
220 }
221
222 return NULL;
223}
224#endif
225
226
227#if defined(FEATURE_TSO)
228cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
229{
230 cdf_nbuf_t msdu = msdu_list;
231 struct ol_txrx_msdu_info_t msdu_info;
232
233 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
234 msdu_info.htt.action.tx_comp_req = 0;
235 /*
236 * The msdu_list variable could be used instead of the msdu var,
237 * but just to clarify which operations are done on a single MSDU
238 * vs. a list of MSDUs, use a distinct variable for single MSDUs
239 * within the list.
240 */
241 while (msdu) {
242 cdf_nbuf_t next;
243 struct ol_tx_desc_t *tx_desc;
244 int segments = 1;
245
246 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
247 msdu_info.peer = NULL;
248
249 ol_tx_prepare_tso(vdev, msdu, msdu_info);
250 segments = msdu_info.tso_info.num_segs;
251
252 /*
253 * The netbuf may get linked into a different list inside the
254 * ol_tx_send function, so store the next pointer before the
255 * tx_send call.
256 */
257 next = cdf_nbuf_next(msdu);
258 /* init the current segment to the 1st segment in the list */
259 while (segments) {
260
261 if (msdu_info.tso_info.curr_seg)
262 NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
263 curr_seg->seg.tso_frags[0].paddr_low_32;
264
265 segments--;
266
267 /**
268 * if this is a jumbo nbuf, then increment the number
269 * of nbuf users for each additional segment of the msdu.
270 * This will ensure that the skb is freed only after
271 * receiving tx completion for all segments of an nbuf
272 */
273 if (segments)
274 cdf_nbuf_inc_users(msdu);
275
276 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
277
278 /*
279 * If debug display is enabled, show the meta-data being
280 * downloaded to the target via the HTT tx descriptor.
281 */
282 htt_tx_desc_display(tx_desc->htt_tx_desc);
283
284 ol_tx_send(vdev->pdev, tx_desc, msdu);
285
286 if (msdu_info.tso_info.curr_seg) {
287 msdu_info.tso_info.curr_seg =
288 msdu_info.tso_info.curr_seg->next;
289 }
290
291 cdf_nbuf_dec_num_frags(msdu);
292
293 if (msdu_info.tso_info.is_tso) {
294 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
295 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
296 }
297 } /* while segments */
298
299 msdu = next;
300 if (msdu_info.tso_info.is_tso) {
301 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
302 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
303 }
304 } /* while msdus */
305 return NULL; /* all MSDUs were accepted */
306}
307#else /* TSO */
308
309cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
310{
311 cdf_nbuf_t msdu = msdu_list;
312 struct ol_txrx_msdu_info_t msdu_info;
313
314 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
315 msdu_info.htt.action.tx_comp_req = 0;
316 msdu_info.tso_info.is_tso = 0;
317 /*
318 * The msdu_list variable could be used instead of the msdu var,
319 * but just to clarify which operations are done on a single MSDU
320 * vs. a list of MSDUs, use a distinct variable for single MSDUs
321 * within the list.
322 */
323 while (msdu) {
324 cdf_nbuf_t next;
325 struct ol_tx_desc_t *tx_desc;
326
327 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
328 msdu_info.peer = NULL;
329 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
330
331 /*
332 * If debug display is enabled, show the meta-data being
333 * downloaded to the target via the HTT tx descriptor.
334 */
335 htt_tx_desc_display(tx_desc->htt_tx_desc);
336 /*
337 * The netbuf may get linked into a different list inside the
338 * ol_tx_send function, so store the next pointer before the
339 * tx_send call.
340 */
341 next = cdf_nbuf_next(msdu);
342 ol_tx_send(vdev->pdev, tx_desc, msdu);
343 msdu = next;
344 }
345 return NULL; /* all MSDUs were accepted */
346}
347#endif /* TSO */
348
349#ifdef WLAN_FEATURE_FASTPATH
350/**
351 * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
352 *
353 * Allocate and prepare Tx descriptor with msdu and fragment descritor
354 * inforamtion.
355 *
356 * @pdev: pointer to ol pdev handle
357 * @vdev: pointer to ol vdev handle
358 * @msdu: linked list of msdu packets
359 * @pkt_download_len: packet download length
360 * @ep_id: endpoint ID
361 * @msdu_info: Handle to msdu_info
362 *
363 * Return: Pointer to Tx descriptor
364 */
365static inline struct ol_tx_desc_t *
366ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
367 ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
368 uint32_t pkt_download_len, uint32_t ep_id,
369 struct ol_txrx_msdu_info_t *msdu_info)
370{
371 struct ol_tx_desc_t *tx_desc = NULL;
372 uint32_t *htt_tx_desc;
373 void *htc_hdr_vaddr;
374 u_int32_t num_frags, i;
375
376 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
377 if (cdf_unlikely(!tx_desc))
378 return NULL;
379
380 tx_desc->netbuf = msdu;
381 if (msdu_info->tso_info.is_tso) {
382 tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
383 tx_desc->pkt_type = ol_tx_frm_tso;
384 TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
385 } else {
386 tx_desc->pkt_type = ol_tx_frm_std;
387 }
388
389 htt_tx_desc = tx_desc->htt_tx_desc;
390
391 /* Make sure frags num is set to 0 */
392 /*
393 * Do this here rather than in hardstart, so
394 * that we can hopefully take only one cache-miss while
395 * accessing skb->cb.
396 */
397
398 /* HTT Header */
399 /* TODO : Take care of multiple fragments */
400
401 /* TODO: Precompute and store paddr in ol_tx_desc_t */
402 /* Virtual address of the HTT/HTC header, added by driver */
403 htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
404 htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
405 tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
406 &msdu_info->htt, &msdu_info->tso_info,
407 NULL, vdev->opmode == wlan_op_mode_ocb);
408
409 num_frags = cdf_nbuf_get_num_frags(msdu);
410 /* num_frags are expected to be 2 max */
411 num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ?
412 CVG_NBUF_MAX_EXTRA_FRAGS : num_frags;
413#if defined(HELIUMPLUS_PADDR64)
414 /*
415 * Use num_frags - 1, since 1 frag is used to store
416 * the HTT/HTC descriptor
417 * Refer to htt_tx_desc_init()
418 */
419 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
420 num_frags - 1);
421#else /* ! defined(HELIUMPLUSPADDR64) */
422 htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
423 num_frags-1);
424#endif /* defined(HELIUMPLUS_PADDR64) */
425 if (msdu_info->tso_info.is_tso) {
426 htt_tx_desc_fill_tso_info(pdev->htt_pdev,
427 tx_desc->htt_frag_desc, &msdu_info->tso_info);
428 TXRX_STATS_TSO_SEG_UPDATE(pdev,
429 msdu_info->tso_info.curr_seg->seg);
430 } else {
431 for (i = 1; i < num_frags; i++) {
432 cdf_size_t frag_len;
433 u_int32_t frag_paddr;
434
435 frag_len = cdf_nbuf_get_frag_len(msdu, i);
436 frag_paddr = cdf_nbuf_get_frag_paddr_lo(msdu, i);
437#if defined(HELIUMPLUS_PADDR64)
438 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
439 i - 1, frag_paddr, frag_len);
440#if defined(HELIUMPLUS_DEBUG)
441 cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
442 __func__, __LINE__, tx_desc->htt_frag_desc,
443 frag_paddr, frag_len);
444 dump_pkt(netbuf, frag_paddr, 64);
445#endif /* HELIUMPLUS_DEBUG */
446#else /* ! defined(HELIUMPLUSPADDR64) */
447 htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
448 i - 1, frag_paddr, frag_len);
449#endif /* defined(HELIUMPLUS_PADDR64) */
450 }
451 }
452
453 /*
454 * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
455 * this is not required. We still have to mark the swap bit correctly,
456 * when posting to the ring
457 */
458 /* Check to make sure, data download length is correct */
459
460 /*
461 * TODO : Can we remove this check and always download a fixed length ?
462 * */
463 if (cdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
464 pkt_download_len = cdf_nbuf_len(msdu);
465
466 /* Fill the HTC header information */
467 /*
468 * Passing 0 as the seq_no field, we can probably get away
469 * with it for the time being, since this is not checked in f/w
470 */
471 /* TODO : Prefill this, look at multi-fragment case */
472 HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
473
474 return tx_desc;
475}
476#if defined(FEATURE_TSO)
477/**
478 * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
479 *
480 * @vdev: handle to ol_txrx_vdev_t
481 * @msdu_list: msdu list to be sent out.
482 *
483 * Return: on success return NULL, pointer to nbuf when it fails to send.
484 */
485cdf_nbuf_t
486ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
487{
488 cdf_nbuf_t msdu = msdu_list;
489 struct ol_txrx_pdev_t *pdev = vdev->pdev;
490 uint32_t pkt_download_len =
491 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
492 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
493 struct ol_txrx_msdu_info_t msdu_info;
494
495 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
496 msdu_info.htt.action.tx_comp_req = 0;
497 /*
498 * The msdu_list variable could be used instead of the msdu var,
499 * but just to clarify which operations are done on a single MSDU
500 * vs. a list of MSDUs, use a distinct variable for single MSDUs
501 * within the list.
502 */
503 while (msdu) {
504 cdf_nbuf_t next;
505 struct ol_tx_desc_t *tx_desc;
506 int segments = 1;
507
508 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
509 msdu_info.peer = NULL;
510
511 ol_tx_prepare_tso(vdev, msdu, msdu_info);
512 segments = msdu_info.tso_info.num_segs;
513
514 /*
515 * The netbuf may get linked into a different list
516 * inside the ce_send_fast function, so store the next
517 * pointer before the ce_send call.
518 */
519 next = cdf_nbuf_next(msdu);
520 /* init the current segment to the 1st segment in the list */
521 while (segments) {
522
523 if (msdu_info.tso_info.curr_seg)
524 NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
525 curr_seg->seg.tso_frags[0].paddr_low_32;
526
527 segments--;
528
529 /**
530 * if this is a jumbo nbuf, then increment the number
531 * of nbuf users for each additional segment of the msdu.
532 * This will ensure that the skb is freed only after
533 * receiving tx completion for all segments of an nbuf
534 */
535 if (segments)
536 cdf_nbuf_inc_users(msdu);
537
538 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
539 msdu_info.htt.info.vdev_id = vdev->vdev_id;
540 msdu_info.htt.action.cksum_offload =
541 cdf_nbuf_get_tx_cksum(msdu);
542 switch (cdf_nbuf_get_exemption_type(msdu)) {
543 case CDF_NBUF_EXEMPT_NO_EXEMPTION:
544 case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
545 /* We want to encrypt this frame */
546 msdu_info.htt.action.do_encrypt = 1;
547 break;
548 case CDF_NBUF_EXEMPT_ALWAYS:
549 /* We don't want to encrypt this frame */
550 msdu_info.htt.action.do_encrypt = 0;
551 break;
552 default:
553 msdu_info.htt.action.do_encrypt = 1;
554 cdf_assert(0);
555 break;
556 }
557
558 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
559 pkt_download_len, ep_id,
560 &msdu_info);
561
562 if (cdf_likely(tx_desc)) {
563 /*
564 * If debug display is enabled, show the meta
565 * data being downloaded to the target via the
566 * HTT tx descriptor.
567 */
568 htt_tx_desc_display(tx_desc->htt_tx_desc);
569 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
570 1, ep_id))) {
571 /*
572 * The packet could not be sent.
573 * Free the descriptor, return the
574 * packet to the caller.
575 */
576 ol_tx_desc_free(pdev, tx_desc);
577 return msdu;
578 }
579 if (msdu_info.tso_info.curr_seg) {
580 msdu_info.tso_info.curr_seg =
581 msdu_info.tso_info.curr_seg->next;
582 }
583
584 if (msdu_info.tso_info.is_tso) {
585 cdf_nbuf_dec_num_frags(msdu);
586 TXRX_STATS_TSO_INC_SEG(vdev->pdev);
587 TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
588 }
589 } else {
590 TXRX_STATS_MSDU_LIST_INCR(
591 pdev, tx.dropped.host_reject, msdu);
592 /* the list of unaccepted MSDUs */
593 return msdu;
594 }
595 } /* while segments */
596
597 msdu = next;
598 if (msdu_info.tso_info.is_tso) {
599 TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
600 TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
601 }
602 } /* while msdus */
603 return NULL; /* all MSDUs were accepted */
604}
605#else
606cdf_nbuf_t
607ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
608{
609 cdf_nbuf_t msdu = msdu_list;
610 struct ol_txrx_pdev_t *pdev = vdev->pdev;
611 uint32_t pkt_download_len =
612 ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
613 uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
614 struct ol_txrx_msdu_info_t msdu_info;
615
616 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
617 msdu_info.htt.action.tx_comp_req = 0;
618 msdu_info.tso_info.is_tso = 0;
619 /*
620 * The msdu_list variable could be used instead of the msdu var,
621 * but just to clarify which operations are done on a single MSDU
622 * vs. a list of MSDUs, use a distinct variable for single MSDUs
623 * within the list.
624 */
625 while (msdu) {
626 cdf_nbuf_t next;
627 struct ol_tx_desc_t *tx_desc;
628
629 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
630 msdu_info.peer = NULL;
631
632 msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
633 msdu_info.htt.info.vdev_id = vdev->vdev_id;
634 msdu_info.htt.action.cksum_offload =
635 cdf_nbuf_get_tx_cksum(msdu);
636 switch (cdf_nbuf_get_exemption_type(msdu)) {
637 case CDF_NBUF_EXEMPT_NO_EXEMPTION:
638 case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
639 /* We want to encrypt this frame */
640 msdu_info.htt.action.do_encrypt = 1;
641 break;
642 case CDF_NBUF_EXEMPT_ALWAYS:
643 /* We don't want to encrypt this frame */
644 msdu_info.htt.action.do_encrypt = 0;
645 break;
646 default:
647 msdu_info.htt.action.do_encrypt = 1;
648 cdf_assert(0);
649 break;
650 }
651
652 tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
653 pkt_download_len, ep_id,
654 &msdu_info);
655
656 if (cdf_likely(tx_desc)) {
657 /*
658 * If debug display is enabled, show the meta-data being
659 * downloaded to the target via the HTT tx descriptor.
660 */
661 htt_tx_desc_display(tx_desc->htt_tx_desc);
662 /*
663 * The netbuf may get linked into a different list
664 * inside the ce_send_fast function, so store the next
665 * pointer before the ce_send call.
666 */
667 next = cdf_nbuf_next(msdu);
668 if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
669 ep_id))) {
670 /* The packet could not be sent */
671 /* Free the descriptor, return the packet to the
672 * caller */
673 ol_tx_desc_free(pdev, tx_desc);
674 return msdu;
675 }
676 msdu = next;
677 } else {
678 TXRX_STATS_MSDU_LIST_INCR(
679 pdev, tx.dropped.host_reject, msdu);
680 return msdu; /* the list of unaccepted MSDUs */
681 }
682 }
683
684 return NULL; /* all MSDUs were accepted */
685}
686#endif /* FEATURE_TSO */
687#endif /* WLAN_FEATURE_FASTPATH */
688
689#ifdef WLAN_FEATURE_FASTPATH
690/**
691 * ol_tx_ll_wrapper() wrapper to ol_tx_ll
692 *
693 */
694static inline cdf_nbuf_t
695ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
696{
697 struct ol_softc *hif_device =
698 (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
699
700 if (cdf_likely(hif_device && hif_device->fastpath_mode_on))
701 msdu_list = ol_tx_ll_fast(vdev, msdu_list);
702 else
703 msdu_list = ol_tx_ll(vdev, msdu_list);
704
705 return msdu_list;
706}
707#else
708static inline cdf_nbuf_t
709ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
710{
711 return ol_tx_ll(vdev, msdu_list);
712}
713#endif /* WLAN_FEATURE_FASTPATH */
714
715#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
716
717#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
718#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
719static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
720{
721 int max_to_accept;
722
723 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
724 if (vdev->ll_pause.paused_reason) {
725 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
726 return;
727 }
728
729 /*
730 * Send as much of the backlog as possible, but leave some margin
731 * of unallocated tx descriptors that can be used for new frames
732 * being transmitted by other vdevs.
733 * Ideally there would be a scheduler, which would not only leave
734 * some margin for new frames for other vdevs, but also would
735 * fairly apportion the tx descriptors between multiple vdevs that
736 * have backlogs in their pause queues.
737 * However, the fairness benefit of having a scheduler for frames
738 * from multiple vdev's pause queues is not sufficient to outweigh
739 * the extra complexity.
740 */
741 max_to_accept = vdev->pdev->tx_desc.num_free -
742 OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
743 while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
744 cdf_nbuf_t tx_msdu;
745 max_to_accept--;
746 vdev->ll_pause.txq.depth--;
747 tx_msdu = vdev->ll_pause.txq.head;
748 if (tx_msdu) {
749 vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
750 if (NULL == vdev->ll_pause.txq.head)
751 vdev->ll_pause.txq.tail = NULL;
752 cdf_nbuf_set_next(tx_msdu, NULL);
753 NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
754 NBUF_TX_PKT_TXRX_DEQUEUE);
755 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
756 /*
757 * It is unexpected that ol_tx_ll would reject the frame
758 * since we checked that there's room for it, though
759 * there's an infinitesimal possibility that between the
760 * time we checked the room available and now, a
761 * concurrent batch of tx frames used up all the room.
762 * For simplicity, just drop the frame.
763 */
764 if (tx_msdu) {
765 cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
766 CDF_DMA_TO_DEVICE);
767 cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
768 }
769 }
770 }
771 if (vdev->ll_pause.txq.depth) {
772 cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
773 cdf_softirq_timer_start(&vdev->ll_pause.timer,
774 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
775 vdev->ll_pause.is_q_timer_on = true;
776 if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
777 vdev->ll_pause.q_overflow_cnt++;
778 }
779
780 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
781}
782
783static cdf_nbuf_t
784ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
785 cdf_nbuf_t msdu_list, uint8_t start_timer)
786{
787 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
788 while (msdu_list &&
789 vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
790 cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
791 NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
792 DPTRACE(cdf_dp_trace(msdu_list,
793 CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
794 (uint8_t *)(cdf_nbuf_data(msdu_list)),
795 sizeof(cdf_nbuf_data(msdu_list))));
796
797 vdev->ll_pause.txq.depth++;
798 if (!vdev->ll_pause.txq.head) {
799 vdev->ll_pause.txq.head = msdu_list;
800 vdev->ll_pause.txq.tail = msdu_list;
801 } else {
802 cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
803 }
804 vdev->ll_pause.txq.tail = msdu_list;
805
806 msdu_list = next;
807 }
808 if (vdev->ll_pause.txq.tail)
809 cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
810
811 if (start_timer) {
812 cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
813 cdf_softirq_timer_start(&vdev->ll_pause.timer,
814 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
815 vdev->ll_pause.is_q_timer_on = true;
816 }
817 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
818
819 return msdu_list;
820}
821
822/*
823 * Store up the tx frame in the vdev's tx queue if the vdev is paused.
824 * If there are too many frames in the tx queue, reject it.
825 */
826cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
827{
828 uint16_t eth_type;
829 uint32_t paused_reason;
830
831 if (msdu_list == NULL)
832 return NULL;
833
834 paused_reason = vdev->ll_pause.paused_reason;
835 if (paused_reason) {
836 if (cdf_unlikely((paused_reason &
837 OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
838 paused_reason)) {
839 eth_type = (((struct ethernet_hdr_t *)
840 cdf_nbuf_data(msdu_list))->
841 ethertype[0] << 8) |
842 (((struct ethernet_hdr_t *)
843 cdf_nbuf_data(msdu_list))->ethertype[1]);
844 if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
845 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
846 return msdu_list;
847 }
848 }
849 msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
850 } else {
851 if (vdev->ll_pause.txq.depth > 0 ||
852 vdev->pdev->tx_throttle.current_throttle_level !=
853 THROTTLE_LEVEL_0) {
854 /* not paused, but there is a backlog of frms
855 from a prior pause or throttle off phase */
856 msdu_list = ol_tx_vdev_pause_queue_append(
857 vdev, msdu_list, 0);
858 /* if throttle is disabled or phase is "on",
859 send the frame */
860 if (vdev->pdev->tx_throttle.current_throttle_level ==
861 THROTTLE_LEVEL_0 ||
862 vdev->pdev->tx_throttle.current_throttle_phase ==
863 THROTTLE_PHASE_ON) {
864 /* send as many frames as possible
865 from the vdevs backlog */
866 ol_tx_vdev_ll_pause_queue_send_base(vdev);
867 }
868 } else {
869 /* not paused, no throttle and no backlog -
870 send the new frames */
871 msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
872 }
873 }
874 return msdu_list;
875}
876
877/*
878 * Run through the transmit queues for all the vdevs and
879 * send the pending frames
880 */
881void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
882{
883 int max_to_send; /* tracks how many frames have been sent */
884 cdf_nbuf_t tx_msdu;
885 struct ol_txrx_vdev_t *vdev = NULL;
886 uint8_t more;
887
888 if (NULL == pdev)
889 return;
890
891 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
892 return;
893
894 /* ensure that we send no more than tx_threshold frames at once */
895 max_to_send = pdev->tx_throttle.tx_threshold;
896
897 /* round robin through the vdev queues for the given pdev */
898
899 /* Potential improvement: download several frames from the same vdev
900 at a time, since it is more likely that those frames could be
901 aggregated together, remember which vdev was serviced last,
902 so the next call this function can resume the round-robin
903 traversing where the current invocation left off */
904 do {
905 more = 0;
906 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
907
908 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
909 if (vdev->ll_pause.txq.depth) {
910 if (vdev->ll_pause.paused_reason) {
911 cdf_spin_unlock_bh(&vdev->ll_pause.
912 mutex);
913 continue;
914 }
915
916 tx_msdu = vdev->ll_pause.txq.head;
917 if (NULL == tx_msdu) {
918 cdf_spin_unlock_bh(&vdev->ll_pause.
919 mutex);
920 continue;
921 }
922
923 max_to_send--;
924 vdev->ll_pause.txq.depth--;
925
926 vdev->ll_pause.txq.head =
927 cdf_nbuf_next(tx_msdu);
928
929 if (NULL == vdev->ll_pause.txq.head)
930 vdev->ll_pause.txq.tail = NULL;
931
932 cdf_nbuf_set_next(tx_msdu, NULL);
933 tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
934 /*
935 * It is unexpected that ol_tx_ll would reject
936 * the frame, since we checked that there's
937 * room for it, though there's an infinitesimal
938 * possibility that between the time we checked
939 * the room available and now, a concurrent
940 * batch of tx frames used up all the room.
941 * For simplicity, just drop the frame.
942 */
943 if (tx_msdu) {
944 cdf_nbuf_unmap(pdev->osdev, tx_msdu,
945 CDF_DMA_TO_DEVICE);
946 cdf_nbuf_tx_free(tx_msdu,
947 NBUF_PKT_ERROR);
948 }
949 }
950 /*check if there are more msdus to transmit */
951 if (vdev->ll_pause.txq.depth)
952 more = 1;
953 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
954 }
955 } while (more && max_to_send);
956
957 vdev = NULL;
958 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
959 cdf_spin_lock_bh(&vdev->ll_pause.mutex);
960 if (vdev->ll_pause.txq.depth) {
961 cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
962 cdf_softirq_timer_start(
963 &pdev->tx_throttle.tx_timer,
964 OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
965 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
966 return;
967 }
968 cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
969 }
970}
971
972void ol_tx_vdev_ll_pause_queue_send(void *context)
973{
974 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
975 struct ol_txrx_pdev_t *pdev = vdev->pdev;
976
977 if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
978 pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
979 return;
980 ol_tx_vdev_ll_pause_queue_send_base(vdev);
981}
982#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
983
984static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
985{
986 return
987 tx_spec &
988 (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
989}
990
991static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
992{
993 uint8_t sub_type = 0x1; /* 802.11 MAC header present */
994
995 if (tx_spec & ol_tx_spec_no_aggr)
996 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
997 if (tx_spec & ol_tx_spec_no_encrypt)
998 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
999 if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
1000 sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
1001 return sub_type;
1002}
1003
1004cdf_nbuf_t
1005ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
1006 enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
1007{
1008 cdf_nbuf_t msdu = msdu_list;
1009 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
1010 struct ol_txrx_msdu_info_t msdu_info;
1011
1012 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1013 msdu_info.htt.action.tx_comp_req = 0;
1014
1015 /*
1016 * The msdu_list variable could be used instead of the msdu var,
1017 * but just to clarify which operations are done on a single MSDU
1018 * vs. a list of MSDUs, use a distinct variable for single MSDUs
1019 * within the list.
1020 */
1021 while (msdu) {
1022 cdf_nbuf_t next;
1023 struct ol_tx_desc_t *tx_desc;
1024
1025 msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
1026 msdu_info.peer = NULL;
1027 msdu_info.tso_info.is_tso = 0;
1028
1029 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1030
1031 /*
1032 * The netbuf may get linked into a different list inside the
1033 * ol_tx_send function, so store the next pointer before the
1034 * tx_send call.
1035 */
1036 next = cdf_nbuf_next(msdu);
1037
1038 if (tx_spec != ol_tx_spec_std) {
1039 if (tx_spec & ol_tx_spec_no_free) {
1040 tx_desc->pkt_type = ol_tx_frm_no_free;
1041 } else if (tx_spec & ol_tx_spec_tso) {
1042 tx_desc->pkt_type = ol_tx_frm_tso;
1043 } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
1044 uint8_t sub_type =
1045 ol_txrx_tx_raw_subtype(tx_spec);
1046 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1047 htt_pkt_type_native_wifi,
1048 sub_type);
1049 } else if (ol_txrx_tx_is_raw(tx_spec)) {
1050 /* different types of raw frames */
1051 uint8_t sub_type =
1052 ol_txrx_tx_raw_subtype(tx_spec);
1053 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
1054 htt_pkt_type_raw, sub_type);
1055 }
1056 }
1057 /*
1058 * If debug display is enabled, show the meta-data being
1059 * downloaded to the target via the HTT tx descriptor.
1060 */
1061 htt_tx_desc_display(tx_desc->htt_tx_desc);
1062 ol_tx_send(vdev->pdev, tx_desc, msdu);
1063 msdu = next;
1064 }
1065 return NULL; /* all MSDUs were accepted */
1066}
1067
1068#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1069#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
1070 do { \
1071 if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
1072 cdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
1073 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
1074 if (tx_msdu_info.peer) { \
1075 /* remove the peer reference added above */ \
1076 ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
1077 } \
1078 goto MSDU_LOOP_BOTTOM; \
1079 } \
1080 } while (0)
1081#else
1082#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
1083#endif
1084
1085/* tx filtering is handled within the target FW */
1086#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
1087
1088/**
1089 * parse_ocb_tx_header() - Function to check for OCB
1090 * TX control header on a packet and extract it if present
1091 *
1092 * @msdu: Pointer to OS packet (cdf_nbuf_t)
1093 */
1094#define OCB_HEADER_VERSION 1
1095bool parse_ocb_tx_header(cdf_nbuf_t msdu,
1096 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
1097{
1098 struct ether_header *eth_hdr_p;
1099 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
1100
1101 /* Check if TX control header is present */
1102 eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
1103 if (eth_hdr_p->ether_type != CDF_SWAP_U16(ETHERTYPE_OCB_TX))
1104 /* TX control header is not present. Nothing to do.. */
1105 return true;
1106
1107 /* Remove the ethernet header */
1108 cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
1109
1110 /* Parse the TX control header */
1111 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
1112
1113 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
1114 if (tx_ctrl)
1115 cdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
1116 sizeof(*tx_ctrl_hdr));
1117 } else {
1118 /* The TX control header is invalid. */
1119 return false;
1120 }
1121
1122 /* Remove the TX control header */
1123 cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
1124 return true;
1125}
1126
1127cdf_nbuf_t
1128ol_tx_non_std(ol_txrx_vdev_handle vdev,
1129 enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
1130{
1131 return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
1132}
1133
1134void
1135ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
1136 ol_txrx_data_tx_cb callback, void *ctxt)
1137{
1138 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1139 pdev->tx_data_callback.func = callback;
1140 pdev->tx_data_callback.ctxt = ctxt;
1141}
1142
1143void
1144ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
1145 uint8_t type,
1146 ol_txrx_mgmt_tx_cb download_cb,
1147 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
1148{
1149 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1150 pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
1151 pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
1152 pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
1153}
1154
1155#if defined(HELIUMPLUS_PADDR64)
1156void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
1157{
1158 uint32_t *frag_ptr_i_p;
1159 int i;
1160
1161 cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
Leo Chang376398b2015-10-23 14:19:02 -07001162 tx_desc, tx_desc->id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
1164 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
1165 cdf_print("%s %d: Fragment Descriptor 0x%p\n",
1166 __func__, __LINE__, tx_desc->htt_frag_desc);
1167
1168 /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
1169 is already de-referrable (=> in virtual address space) */
1170 frag_ptr_i_p = tx_desc->htt_frag_desc;
1171
1172 /* Dump 6 words of TSO flags */
1173 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
1174 DUMP_PREFIX_NONE, 8, 4,
1175 frag_ptr_i_p, 24, true);
1176
1177 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
1178
1179 i = 0;
1180 while (*frag_ptr_i_p) {
1181 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
1182 DUMP_PREFIX_NONE, 8, 4,
1183 frag_ptr_i_p, 8, true);
1184 i++;
1185 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
1186 break;
1187 else /* jump to next pointer - skip length */
1188 frag_ptr_i_p += 2;
1189 }
1190 return;
1191}
1192#endif /* HELIUMPLUS_PADDR64 */
1193
1194int
1195ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
1196 cdf_nbuf_t tx_mgmt_frm,
1197 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
1198{
1199 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1200 struct ol_tx_desc_t *tx_desc;
1201 struct ol_txrx_msdu_info_t tx_msdu_info;
1202
1203 tx_msdu_info.tso_info.is_tso = 0;
1204
1205 tx_msdu_info.htt.action.use_6mbps = use_6mbps;
1206 tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
1207 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
1208 tx_msdu_info.htt.action.do_tx_complete =
1209 pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
1210
1211 /*
1212 * FIX THIS: l2_hdr_type should only specify L2 header type
1213 * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
1214 * that is a combination of L2 header type and 802.11 frame type.
1215 * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
1216 * But if the 802.11 frame type is "data", then the HTT pkt type is
1217 * the L2 header type (more or less): 802.3 vs. Native WiFi
1218 * (basic 802.11).
1219 * (Or the header type can be "raw", which is any version of the 802.11
1220 * header, and also implies that some of the offloaded tx data
1221 * processing steps may not apply.)
1222 * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
1223 * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
1224 * needs to overload the l2_hdr_type to indicate whether the frame is
1225 * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
1226 * To fix this, the msdu_info's l2_hdr_type should be left specifying
1227 * just the L2 header type. For mgmt frames, there should be a
1228 * separate function to patch the HTT pkt type to store a "mgmt" value
1229 * rather than the L2 header type. Then the HTT pkt type can be
1230 * programmed efficiently for data frames, and the msdu_info's
1231 * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
1232 * frame type rather than the L2 header type.
1233 */
1234 /*
1235 * FIX THIS: remove duplication of htt_frm_type_mgmt and
1236 * htt_pkt_type_mgmt
1237 * The htt module expects a "enum htt_pkt_type" value.
1238 * The htt_dxe module expects a "enum htt_frm_type" value.
1239 * This needs to be cleaned up, so both versions of htt use a
1240 * consistent method of specifying the frame type.
1241 */
1242#ifdef QCA_SUPPORT_INTEGRATED_SOC
1243 /* tx mgmt frames always come with a 802.11 header */
1244 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
1245 tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
1246#else
1247 tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
1248 tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
1249#endif
1250
1251 tx_msdu_info.peer = NULL;
1252
1253 cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, CDF_DMA_TO_DEVICE);
1254 /* For LL tx_comp_req is not used so initialized to 0 */
1255 tx_msdu_info.htt.action.tx_comp_req = 0;
1256 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
1257 /* FIX THIS -
1258 * The FW currently has trouble using the host's fragments table
1259 * for management frames. Until this is fixed, rather than
1260 * specifying the fragment table to the FW, specify just the
1261 * address of the initial fragment.
1262 */
1263#if defined(HELIUMPLUS_PADDR64)
1264 /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
1265 tx_desc); */
1266#endif /* defined(HELIUMPLUS_PADDR64) */
1267 if (tx_desc) {
1268 /*
1269 * Following the call to ol_tx_desc_ll, frag 0 is the
1270 * HTT tx HW descriptor, and the frame payload is in
1271 * frag 1.
1272 */
1273 htt_tx_desc_frags_table_set(
1274 pdev->htt_pdev,
1275 tx_desc->htt_tx_desc,
1276 cdf_nbuf_get_frag_paddr_lo(tx_mgmt_frm, 1),
1277 0, 0);
1278#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
1279 dump_frag_desc(
1280 "after htt_tx_desc_frags_table_set",
1281 tx_desc);
1282#endif /* defined(HELIUMPLUS_PADDR64) */
1283 }
1284 if (!tx_desc) {
1285 cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
1286 CDF_DMA_TO_DEVICE);
1287 return -EINVAL; /* can't accept the tx mgmt frame */
1288 }
1289 TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
1290 TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
1291 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
1292
1293 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
1294 NBUF_SET_PACKET_TRACK(tx_desc->netbuf, NBUF_TX_PKT_MGMT_TRACK);
1295 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
1296 htt_pkt_type_mgmt);
1297
1298 return 0; /* accepted the tx mgmt frame */
1299}
1300
1301void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
1302{
1303 htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
1304}
1305
1306cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
1307 cdf_nbuf_t msdu, uint16_t peer_id)
1308{
1309 struct ol_tx_desc_t *tx_desc;
1310 struct ol_txrx_msdu_info_t msdu_info;
1311
1312 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
1313 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
1314 msdu_info.peer = NULL;
1315 msdu_info.htt.action.tx_comp_req = 0;
1316 msdu_info.tso_info.is_tso = 0;
1317
1318 ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
1319 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
1320
1321 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
1322
1323 ol_tx_send(vdev->pdev, tx_desc, msdu);
1324
1325 return NULL;
1326}
1327
1328#if defined(FEATURE_TSO)
1329void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
1330{
1331 int i;
Leo Chang376398b2015-10-23 14:19:02 -07001332 struct cdf_tso_seg_elem_t *c_element;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001333
Leo Chang376398b2015-10-23 14:19:02 -07001334 c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
1335 pdev->tso_seg_pool.freelist = c_element;
1336 for (i = 0; i < (num_seg - 1); i++) {
1337 c_element->next =
1338 cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
1339 c_element = c_element->next;
1340 c_element->next = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001341 }
Leo Chang376398b2015-10-23 14:19:02 -07001342 pdev->tso_seg_pool.pool_size = num_seg;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001343 cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
1344}
1345
1346void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
1347{
Leo Chang376398b2015-10-23 14:19:02 -07001348 int i;
1349 struct cdf_tso_seg_elem_t *c_element;
1350 struct cdf_tso_seg_elem_t *temp;
1351
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001352 cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
Leo Chang376398b2015-10-23 14:19:02 -07001353 c_element = pdev->tso_seg_pool.freelist;
1354 for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
1355 temp = c_element->next;
1356 cdf_mem_free(c_element);
1357 c_element = temp;
1358 if (!c_element)
1359 break;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001360 }
1361
1362 pdev->tso_seg_pool.freelist = NULL;
1363 pdev->tso_seg_pool.num_free = 0;
1364 pdev->tso_seg_pool.pool_size = 0;
1365 cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
1366 cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
1367}
1368#endif /* FEATURE_TSO */