blob: dc25cbc2e000414894e64a6bbe21e37c7ba8b7d4 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file htt_tx.c
30 * @brief Implement transmit aspects of HTT.
31 * @details
32 * This file contains three categories of HTT tx code:
33 * 1. An abstraction of the tx descriptor, to hide the
34 * differences between the HL vs. LL tx descriptor.
35 * 2. Functions for allocating and freeing HTT tx descriptors.
36 * 3. The function that accepts a tx frame from txrx and sends the
37 * tx frame to HTC.
38 */
39#include <osdep.h> /* uint32_t, offsetof, etc. */
40#include <cdf_types.h> /* cdf_dma_addr_t */
41#include <cdf_memory.h> /* cdf_os_mem_alloc_consistent et al */
42#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
43#include <cdf_time.h> /* cdf_mdelay */
44
45#include <htt.h> /* htt_tx_msdu_desc_t */
46#include <htc.h> /* HTC_HDR_LENGTH */
47#include <htc_api.h> /* htc_flush_surprise_remove */
48#include <ol_cfg.h> /* ol_cfg_netbuf_frags_max, etc. */
49#include <ol_htt_tx_api.h> /* HTT_TX_DESC_VADDR_OFFSET */
50#include <ol_txrx_htt_api.h> /* ol_tx_msdu_id_storage */
51#include <htt_internal.h>
52
53/* IPA Micro controler TX data packet HTT Header Preset */
54/* 31 | 30 29 | 28 | 27 | 26 22 | 21 16 | 15 13 | 12 8 | 7 0
55 *----------------------------------------------------------------------------
56 * R | CS OL | R | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
57 * 0 | 0 | 0 | | 0x1F | 0 | 2 | 0 | 0x01
58 ***----------------------------------------------------------------------------
59 * pkt ID | pkt length
60 ***----------------------------------------------------------------------------
61 * frag_desc_ptr
62 ***----------------------------------------------------------------------------
63 * peer_id
64 ***----------------------------------------------------------------------------
65 */
66#define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
67
68/*--- setup / tear-down functions -------------------------------------------*/
69
70#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
71uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
72#endif
73
74int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
75{
76 int i, pool_size;
77 uint32_t **p;
78 cdf_dma_addr_t pool_paddr;
79
80#if defined(HELIUMPLUS_PADDR64)
81 pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
82
83 if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
84 /*
85 * sizeof MSDU_EXT/Fragmentation descriptor.
86 */
87 pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
88 } else {
89 /*
90 * Add the fragmentation descriptor elements.
91 * Add the most that the OS may deliver, plus one more
92 * in case the txrx code adds a prefix fragment (for
93 * TSO or audio interworking SNAP header)
94 */
95 pdev->frag_descs.size =
96 (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
97 + 4;
98 }
99#else /* ! defined(HELIUMPLUS_PADDR64) */
100 /*
101 * Start with the size of the base struct
102 * that actually gets downloaded.
103 *
104 * Add the fragmentation descriptor elements.
105 * Add the most that the OS may deliver, plus one more
106 * in case the txrx code adds a prefix fragment (for
107 * TSO or audio interworking SNAP header)
108 */
109 pdev->tx_descs.size =
110 sizeof(struct htt_host_tx_desc_t)
111 + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
112 /* 2x uint32_t */
113 + 4; /* uint32_t fragmentation list terminator */
114
115 if (pdev->tx_descs.size < sizeof(uint32_t *))
116 pdev->tx_descs.size = sizeof(uint32_t *);
117#endif /* defined(HELIUMPLUS_PADDR64) */
118 /*
119 * Make sure tx_descs.size is a multiple of 4-bytes.
120 * It should be, but round up just to be sure.
121 */
122 pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
123
124 pdev->tx_descs.pool_elems = desc_pool_elems;
125 pdev->tx_descs.alloc_cnt = 0;
126
127 pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
128
129 pdev->tx_descs.pool_vaddr =
130 cdf_os_mem_alloc_consistent(
131 pdev->osdev, pool_size,
132 &pool_paddr,
133 cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
134
135 pdev->tx_descs.pool_paddr = pool_paddr;
136
137 if (!pdev->tx_descs.pool_vaddr)
138 return -ENOBUFS; /* failure */
139
140 cdf_print("%s:htt_desc_start:0x%p htt_desc_end:0x%p\n", __func__,
141 pdev->tx_descs.pool_vaddr,
142 (uint32_t *) (pdev->tx_descs.pool_vaddr + pool_size));
143
144#if defined(HELIUMPLUS_PADDR64)
145 pdev->frag_descs.pool_elems = desc_pool_elems;
146 /*
147 * Allocate space for MSDU extension descriptor
148 * H/W expects this in contiguous memory
149 */
150 pool_size = pdev->frag_descs.pool_elems * pdev->frag_descs.size;
151
152 pdev->frag_descs.pool_vaddr = cdf_os_mem_alloc_consistent(
153 pdev->osdev, pool_size, &pool_paddr,
154 cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
155
156 if (!pdev->frag_descs.pool_vaddr)
157 return -ENOBUFS; /* failure */
158
159 pdev->frag_descs.pool_paddr = pool_paddr;
160
161 cdf_print("%s:MSDU Ext.Table Start:0x%p MSDU Ext.Table End:0x%p\n",
162 __func__, pdev->frag_descs.pool_vaddr,
163 (u_int32_t *) (pdev->frag_descs.pool_vaddr + pool_size));
164#endif /* defined(HELIUMPLUS_PADDR64) */
165
166#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
167 g_dbg_htt_desc_end_addr = (uint32_t *)
168 (pdev->tx_descs.pool_vaddr + pool_size);
169 g_dbg_htt_desc_start_addr = (uint32_t *) pdev->tx_descs.pool_vaddr;
170#endif
171
172 /* link tx descriptors into a freelist */
173 pdev->tx_descs.freelist = (uint32_t *) pdev->tx_descs.pool_vaddr;
174 p = (uint32_t **) pdev->tx_descs.freelist;
175 for (i = 0; i < desc_pool_elems - 1; i++) {
176 *p = (uint32_t *) (((char *)p) + pdev->tx_descs.size);
177 p = (uint32_t **) *p;
178 }
179 *p = NULL;
180
181 return 0; /* success */
182}
183
184void htt_tx_detach(struct htt_pdev_t *pdev)
185{
186 if (pdev) {
187 cdf_os_mem_free_consistent(
188 pdev->osdev,
189 /* pool_size */
190 pdev->tx_descs.pool_elems * pdev->tx_descs.size,
191 pdev->tx_descs.pool_vaddr,
192 pdev->tx_descs.pool_paddr,
193 cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
194#if defined(HELIUMPLUS_PADDR64)
195 cdf_os_mem_free_consistent(
196 pdev->osdev,
197 /* pool_size */
198 pdev->frag_descs.pool_elems *
199 pdev->frag_descs.size,
200 pdev->frag_descs.pool_vaddr,
201 pdev->frag_descs.pool_paddr,
202 cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
203#endif /* defined(HELIUMPLUS_PADDR64) */
204 }
205}
206
207/*--- descriptor allocation functions ---------------------------------------*/
208
209void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo)
210{
211 struct htt_host_tx_desc_t *htt_host_tx_desc; /* includes HTC hdr */
212 struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include HTC hdr */
213 uint16_t index;
214 uint32_t *fragmentation_descr_field_ptr;
215
216 htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
217 if (!htt_host_tx_desc)
218 return NULL; /* pool is exhausted */
219
220 htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
221
222 if (pdev->tx_descs.freelist) {
223 pdev->tx_descs.freelist =
224 *((uint32_t **) pdev->tx_descs.freelist);
225 pdev->tx_descs.alloc_cnt++;
226 }
227 /*
228 * For LL, set up the fragmentation descriptor address.
229 * Currently, this HTT tx desc allocation is performed once up front.
230 * If this is changed to have the allocation done during tx, then it
231 * would be helpful to have separate htt_tx_desc_alloc functions for
232 * HL vs. LL, to remove the below conditional branch.
233 */
234 fragmentation_descr_field_ptr = (uint32_t *)
235 ((uint32_t *) htt_tx_desc) +
236 HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
237
238 index = ((char *)htt_host_tx_desc -
239 (char *)(((struct htt_host_tx_desc_t *)
240 pdev->tx_descs.pool_vaddr))) /
241 pdev->tx_descs.size;
242 /*
243 * The fragmentation descriptor is allocated from consistent
244 * memory. Therefore, we can use the address directly rather
245 * than having to map it from a virtual/CPU address to a
246 * physical/bus address.
247 */
248#if defined(HELIUMPLUS_PADDR64)
249#if HTT_PADDR64
250 /* this is: frags_desc_ptr.lo */
251 *fragmentation_descr_field_ptr = (uint32_t)
252 (pdev->frag_descs.pool_paddr +
253 (pdev->frag_descs.size * index));
254 fragmentation_descr_field_ptr++;
255 /* frags_desc_ptr.hi */
256 *fragmentation_descr_field_ptr = 0;
257#else /* ! HTT_PADDR64 */
258 *fragmentation_descr_field_ptr = (uint32_t)
259 (pdev->frag_descs.pool_paddr +
260 (pdev->frag_descs.size * index));
261 cdf_print("%s %d: i %d frag_paddr 0x%x\n",
262 __func__, __LINE__, index,
263 (*fragmentation_descr_field_ptr));
264#endif /* HTT_PADDR64 */
265#else /* !HELIUMPLUS_PADDR64 */
266 *fragmentation_descr_field_ptr =
267 HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
268#endif /* HELIUMPLUS_PADDR64 */
269
270 /*
271 * Include the headroom for the HTC frame header when specifying the
272 * physical address for the HTT tx descriptor.
273 */
274 *paddr_lo = (uint32_t) HTT_TX_DESC_PADDR(pdev, htt_host_tx_desc);
275 /*
276 * The allocated tx descriptor space includes headroom for a
277 * HTC frame header. Hide this headroom, so that we don't have
278 * to jump past the headroom each time we program a field within
279 * the tx desc, but only once when we download the tx desc (and
280 * the headroom) to the target via HTC.
281 * Skip past the headroom and return the address of the HTT tx desc.
282 */
283 return (void *)htt_tx_desc;
284}
285
286void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
287{
288 char *htt_host_tx_desc = tx_desc;
289 /* rewind over the HTC frame header space */
290 htt_host_tx_desc -=
291 offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
292 *((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
293 pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
294 pdev->tx_descs.alloc_cnt--;
295}
296
297/*--- descriptor field access methods ---------------------------------------*/
298
299void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
300 void *htt_tx_desc,
301 uint32_t paddr,
302 uint32_t frag_desc_paddr_lo,
303 int reset)
304{
305 uint32_t *fragmentation_descr_field_ptr;
306
307 fragmentation_descr_field_ptr = (uint32_t *)
308 ((uint32_t *) htt_tx_desc) +
309 HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
310 if (reset) {
311#if defined(HELIUMPLUS_PADDR64)
312 *fragmentation_descr_field_ptr = frag_desc_paddr_lo;
313#else
314 *fragmentation_descr_field_ptr =
315 HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
316#endif
317 } else {
318 *fragmentation_descr_field_ptr = paddr;
319 }
320}
321
322#if defined(HELIUMPLUS_PADDR64)
323void *
324htt_tx_frag_alloc(htt_pdev_handle pdev,
325 u_int16_t index,
326 u_int32_t *frag_paddr_lo)
327{
328 /** Index should never be 0, since its used by the hardware
329 to terminate the link. */
330 if (index >= pdev->tx_descs.pool_elems)
331 return NULL;
332
333 *frag_paddr_lo = (uint32_t)
334 (pdev->frag_descs.pool_paddr + (pdev->frag_descs.size * index));
335
336 return ((char *) pdev->frag_descs.pool_vaddr) +
337 (pdev->frag_descs.size * index);
338}
339#endif /* defined(HELIUMPLUS_PADDR64) */
340
341/* PUT THESE AS INLINE IN ol_htt_tx_api.h */
342
343void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
344{
345}
346
347void htt_tx_pending_discard(htt_pdev_handle pdev)
348{
349 htc_flush_surprise_remove(pdev->htc_pdev);
350}
351
352void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
353{
354}
355
356/*--- tx send function ------------------------------------------------------*/
357
358#ifdef ATH_11AC_TXCOMPACT
359
360/* Scheduling the Queued packets in HTT which could not be sent out
361 because of No CE desc*/
362void htt_tx_sched(htt_pdev_handle pdev)
363{
364 cdf_nbuf_t msdu;
365 int download_len = pdev->download_len;
366 int packet_len;
367
368 HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
369 while (msdu != NULL) {
370 int not_accepted;
371 /* packet length includes HTT tx desc frag added above */
372 packet_len = cdf_nbuf_len(msdu);
373 if (packet_len < download_len) {
374 /*
375 * This case of packet length being less than the
376 * nominal download length can happen for a couple
377 * of reasons:
378 * In HL, the nominal download length is a large
379 * artificial value.
380 * In LL, the frame may not have the optional header
381 * fields accounted for in the nominal download size
382 * (LLC/SNAP header, IPv4 or IPv6 header).
383 */
384 download_len = packet_len;
385 }
386
387 not_accepted =
388 htc_send_data_pkt(pdev->htc_pdev, msdu,
389 pdev->htc_endpoint,
390 download_len);
391 if (not_accepted) {
392 HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
393 return;
394 }
395 HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
396 }
397}
398
399int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
400{
401
402 int download_len = pdev->download_len;
403
404 int packet_len;
405
406 /* packet length includes HTT tx desc frag added above */
407 packet_len = cdf_nbuf_len(msdu);
408 if (packet_len < download_len) {
409 /*
410 * This case of packet length being less than the nominal
411 * download length can happen for a couple of reasons:
412 * In HL, the nominal download length is a large artificial
413 * value.
414 * In LL, the frame may not have the optional header fields
415 * accounted for in the nominal download size (LLC/SNAP header,
416 * IPv4 or IPv6 header).
417 */
418 download_len = packet_len;
419 }
420
421 NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
422 DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
423 (uint8_t *)(cdf_nbuf_data(msdu)),
424 sizeof(cdf_nbuf_data(msdu))));
425 if (cdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
426 HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
427 htt_tx_sched(pdev);
428 return 0;
429 }
430
431 cdf_nbuf_trace_update(msdu, "HT:T:");
432 if (htc_send_data_pkt
433 (pdev->htc_pdev, msdu, pdev->htc_endpoint, download_len)) {
434 HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
435 }
436
437 return 0; /* success */
438
439}
440
441cdf_nbuf_t
442htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
443{
444 cdf_print("*** %s curently only applies for HL systems\n", __func__);
445 cdf_assert(0);
446 return head_msdu;
447
448}
449
450int
451htt_tx_send_nonstd(htt_pdev_handle pdev,
452 cdf_nbuf_t msdu,
453 uint16_t msdu_id, enum htt_pkt_type pkt_type)
454{
455 int download_len;
456
457 /*
458 * The pkt_type could be checked to see what L2 header type is present,
459 * and then the L2 header could be examined to determine its length.
460 * But for simplicity, just use the maximum possible header size,
461 * rather than computing the actual header size.
462 */
463 download_len = sizeof(struct htt_host_tx_desc_t)
464 + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
465 + HTT_TX_HDR_SIZE_802_1Q
466 + HTT_TX_HDR_SIZE_LLC_SNAP
467 + ol_cfg_tx_download_size(pdev->ctrl_pdev);
468 cdf_assert(download_len <= pdev->download_len);
469 return htt_tx_send_std(pdev, msdu, msdu_id);
470}
471
472#else /*ATH_11AC_TXCOMPACT */
473
474#ifdef QCA_TX_HTT2_SUPPORT
475static inline HTC_ENDPOINT_ID
476htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, cdf_nbuf_t msdu)
477{
478 /*
479 * TX HTT2 service mainly for small sized frame and check if
480 * this candidate frame allow or not.
481 */
482 if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
483 cdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
484 (cdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
485 return pdev->htc_tx_htt2_endpoint;
486 else
487 return pdev->htc_endpoint;
488}
489#else
490#define htt_tx_htt2_get_ep_id(pdev, msdu) (pdev->htc_endpoint)
491#endif /* QCA_TX_HTT2_SUPPORT */
492
493static inline int
494htt_tx_send_base(htt_pdev_handle pdev,
495 cdf_nbuf_t msdu,
496 uint16_t msdu_id, int download_len, uint8_t more_data)
497{
498 struct htt_host_tx_desc_t *htt_host_tx_desc;
499 struct htt_htc_pkt *pkt;
500 int packet_len;
501 HTC_ENDPOINT_ID ep_id;
502
503 /*
504 * The HTT tx descriptor was attached as the prefix fragment to the
505 * msdu netbuf during the call to htt_tx_desc_init.
506 * Retrieve it so we can provide its HTC header space to HTC.
507 */
508 htt_host_tx_desc = (struct htt_host_tx_desc_t *)
509 cdf_nbuf_get_frag_vaddr(msdu, 0);
510
511 pkt = htt_htc_pkt_alloc(pdev);
512 if (!pkt)
513 return -ENOBUFS; /* failure */
514
515 pkt->msdu_id = msdu_id;
516 pkt->pdev_ctxt = pdev->txrx_pdev;
517
518 /* packet length includes HTT tx desc frag added above */
519 packet_len = cdf_nbuf_len(msdu);
520 if (packet_len < download_len) {
521 /*
522 * This case of packet length being less than the nominal
523 * download length can happen for a couple reasons:
524 * In HL, the nominal download length is a large artificial
525 * value.
526 * In LL, the frame may not have the optional header fields
527 * accounted for in the nominal download size (LLC/SNAP header,
528 * IPv4 or IPv6 header).
529 */
530 download_len = packet_len;
531 }
532
533 ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
534
535 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
536 pdev->tx_send_complete_part2,
537 (unsigned char *)htt_host_tx_desc,
538 download_len - HTC_HDR_LENGTH,
539 ep_id,
540 1); /* tag - not relevant here */
541
542 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
543
544 cdf_nbuf_trace_update(msdu, "HT:T:");
545 NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
546 DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
547 (uint8_t *)(cdf_nbuf_data(msdu)),
548 sizeof(cdf_nbuf_data(msdu))));
549 htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
550
551 return 0; /* success */
552}
553
554cdf_nbuf_t
555htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
556{
557 cdf_nbuf_t rejected = NULL;
558 uint16_t *msdu_id_storage;
559 uint16_t msdu_id;
560 cdf_nbuf_t msdu;
561 /*
562 * FOR NOW, iterate through the batch, sending the frames singly.
563 * Eventually HTC and HIF should be able to accept a batch of
564 * data frames rather than singles.
565 */
566 msdu = head_msdu;
567 while (num_msdus--) {
568 cdf_nbuf_t next_msdu = cdf_nbuf_next(msdu);
569 msdu_id_storage = ol_tx_msdu_id_storage(msdu);
570 msdu_id = *msdu_id_storage;
571
572 /* htt_tx_send_base returns 0 as success and 1 as failure */
573 if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
574 num_msdus)) {
575 cdf_nbuf_set_next(msdu, rejected);
576 rejected = msdu;
577 }
578 msdu = next_msdu;
579 }
580 return rejected;
581}
582
583int
584htt_tx_send_nonstd(htt_pdev_handle pdev,
585 cdf_nbuf_t msdu,
586 uint16_t msdu_id, enum htt_pkt_type pkt_type)
587{
588 int download_len;
589
590 /*
591 * The pkt_type could be checked to see what L2 header type is present,
592 * and then the L2 header could be examined to determine its length.
593 * But for simplicity, just use the maximum possible header size,
594 * rather than computing the actual header size.
595 */
596 download_len = sizeof(struct htt_host_tx_desc_t)
597 + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
598 + HTT_TX_HDR_SIZE_802_1Q
599 + HTT_TX_HDR_SIZE_LLC_SNAP
600 + ol_cfg_tx_download_size(pdev->ctrl_pdev);
601 return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
602}
603
604int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
605{
606 return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
607}
608
609#endif /*ATH_11AC_TXCOMPACT */
610#ifdef HTT_DBG
611void htt_tx_desc_display(void *tx_desc)
612{
613 struct htt_tx_msdu_desc_t *htt_tx_desc;
614
615 htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
616
617 /* only works for little-endian */
618 cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
619 cdf_print(" msg type = %d\n", htt_tx_desc->msg_type);
620 cdf_print(" pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
621 cdf_print(" pkt type = %d\n", htt_tx_desc->pkt_type);
622 cdf_print(" vdev ID = %d\n", htt_tx_desc->vdev_id);
623 cdf_print(" ext TID = %d\n", htt_tx_desc->ext_tid);
624 cdf_print(" postponed = %d\n", htt_tx_desc->postponed);
625#if HTT_PADDR64
626 cdf_print(" reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
627 cdf_print(" cksum_offload = %d\n", htt_tx_desc->cksum_offload);
628 cdf_print(" tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
629#else /* !HTT_PADDR64 */
630 cdf_print(" batch more = %d\n", htt_tx_desc->more_in_batch);
631#endif /* HTT_PADDR64 */
632 cdf_print(" length = %d\n", htt_tx_desc->len);
633 cdf_print(" id = %d\n", htt_tx_desc->id);
634#if HTT_PADDR64
635 cdf_print(" frag desc addr.lo = %#x\n",
636 htt_tx_desc->frags_desc_ptr.lo);
637 cdf_print(" frag desc addr.hi = %#x\n",
638 htt_tx_desc->frags_desc_ptr.hi);
639 cdf_print(" peerid = %d\n", htt_tx_desc->peerid);
640 cdf_print(" chanfreq = %d\n", htt_tx_desc->chanfreq);
641#else /* ! HTT_PADDR64 */
642 cdf_print(" frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
643#endif /* HTT_PADDR64 */
644}
645#endif
646
647#ifdef IPA_OFFLOAD
648int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
649 unsigned int uc_tx_buf_sz,
650 unsigned int uc_tx_buf_cnt,
651 unsigned int uc_tx_partition_base)
652{
653 unsigned int tx_buffer_count;
654 cdf_nbuf_t buffer_vaddr;
655 uint32_t buffer_paddr;
656 uint32_t *header_ptr;
657 uint32_t *ring_vaddr;
658 int return_code = 0;
659 unsigned int tx_comp_ring_size;
660
661 /* Allocate CE Write Index WORD */
662 pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
663 cdf_os_mem_alloc_consistent(
664 pdev->osdev,
665 4,
666 &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
667 cdf_get_dma_mem_context(
668 (&pdev->ipa_uc_tx_rsc.tx_ce_idx),
669 memctx));
670 if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
671 cdf_print("%s: CE Write Index WORD alloc fail", __func__);
672 return -ENOBUFS;
673 }
674
675 /* Allocate TX COMP Ring */
676 tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
677 pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
678 cdf_os_mem_alloc_consistent(
679 pdev->osdev,
680 tx_comp_ring_size,
681 &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
682 cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
683 tx_comp_base),
684 memctx));
685 if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
686 cdf_print("%s: TX COMP ring alloc fail", __func__);
687 return_code = -ENOBUFS;
688 goto free_tx_ce_idx;
689 }
690
691 cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
692
693 /* Allocate TX BUF vAddress Storage */
694 pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
695 (cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
696 sizeof(cdf_nbuf_t));
697 if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
698 cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
699 return_code = -ENOBUFS;
700 goto free_tx_comp_base;
701 }
702 cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
703 uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
704
705 ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
706 /* Allocate TX buffers as many as possible */
707 for (tx_buffer_count = 0;
708 tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
709 buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
710 uc_tx_buf_sz, 0, 4, false);
711 if (!buffer_vaddr) {
712 cdf_print("%s: TX BUF alloc fail, loop index: %d",
713 __func__, tx_buffer_count);
714 return 0;
715 }
716
717 /* Init buffer */
718 cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
719 header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
720
721 /* HTT control header */
722 *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
723 header_ptr++;
724
725 /* PKT ID */
726 *header_ptr |= ((uint16_t) uc_tx_partition_base +
727 tx_buffer_count) << 16;
728
729 cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
730 buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
731 header_ptr++;
732
733 /* Frag Desc Pointer */
734 /* 64bits descriptor, Low 32bits */
735 *header_ptr = (uint32_t) (buffer_paddr + 20);
736 header_ptr++;
737
738 /* 64bits descriptor, high 32bits */
739 *header_ptr = 0;
740 header_ptr++;
741
742 /* chanreq, peerid */
743 *header_ptr = 0xFFFFFFFF;
744
745 /* FRAG Header */
746 /* 6 words TSO header */
747 header_ptr += 6;
748 *header_ptr = buffer_paddr + 64;
749
750 *ring_vaddr = buffer_paddr;
751 pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
752 buffer_vaddr;
753 /* Memory barrier to ensure actual value updated */
754
755 ring_vaddr += 2;
756 }
757
758 pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
759
760 return 0;
761
762free_tx_comp_base:
763 cdf_os_mem_free_consistent(pdev->osdev,
764 ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
765 ctrl_pdev) * 4,
766 pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
767 pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
768 cdf_get_dma_mem_context((&pdev->
769 ipa_uc_tx_rsc.
770 tx_comp_base),
771 memctx));
772free_tx_ce_idx:
773 cdf_os_mem_free_consistent(pdev->osdev,
774 4,
775 pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
776 pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
777 cdf_get_dma_mem_context((&pdev->
778 ipa_uc_tx_rsc.
779 tx_ce_idx),
780 memctx));
781 return return_code;
782}
783
784int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
785{
786 uint16_t idx;
787
788 if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
789 cdf_os_mem_free_consistent(
790 pdev->osdev,
791 4,
792 pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
793 pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
794 cdf_get_dma_mem_context(
795 (&pdev->ipa_uc_tx_rsc.tx_ce_idx),
796 memctx));
797 }
798
799 if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
800 cdf_os_mem_free_consistent(
801 pdev->osdev,
802 ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
803 pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
804 pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
805 cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
806 tx_comp_base),
807 memctx));
808 }
809
810 /* Free each single buffer */
811 for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
812 if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
813 cdf_nbuf_unmap(pdev->osdev,
814 pdev->ipa_uc_tx_rsc.
815 tx_buf_pool_vaddr_strg[idx],
816 CDF_DMA_FROM_DEVICE);
817 cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
818 tx_buf_pool_vaddr_strg[idx]);
819 }
820 }
821
822 /* Free storage */
823 cdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
824
825 return 0;
826}
827#endif /* IPA_OFFLOAD */
828
829#if defined(FEATURE_TSO)
830void
831htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
832 struct cdf_tso_info_t *tso_info)
833{
834 u_int32_t *word;
835 int i;
836 struct cdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
837 struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
838
839 word = (u_int32_t *)(desc);
840
841 /* Initialize the TSO flags per MSDU */
842 ((struct msdu_ext_desc_t *)msdu_ext_desc)->tso_flags =
843 tso_seg->seg.tso_flags;
844
845 /* First 24 bytes (6*4) contain the TSO flags */
846 word += 6;
847
848 for (i = 0; i < tso_seg->seg.num_frags; i++) {
849 /* [31:0] first 32 bits of the buffer pointer */
850 *word = tso_seg->seg.tso_frags[i].paddr_low_32;
851 word++;
852 /* [15:0] the upper 16 bits of the first buffer pointer */
853 /* [31:16] length of the first buffer */
854 *word = (tso_seg->seg.tso_frags[i].length << 16);
855 word++;
856 }
857
858 if (tso_seg->seg.num_frags < FRAG_NUM_MAX) {
859 *word = 0;
860 word++;
861 *word = 0;
862 }
863}
864#endif /* FEATURE_TSO */