blob: 1f1e5d5b68f8f06b8085b6413c18d0edaa1c8a77 [file] [log] [blame]
Nirav Shah52d85aa2018-04-26 14:03:00 +05301/*
Srinivas Girigowdaa9877462019-02-20 11:20:38 -08002 * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
Nirav Shah52d85aa2018-04-26 14:03:00 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
20#include <qdf_lock.h> /* qdf_os_spinlock */
21#include <qdf_time.h> /* qdf_system_ticks, etc. */
22#include <qdf_nbuf.h> /* qdf_nbuf_t */
23#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
24
Ashish Kumar Dhanotiya94ffbd12019-08-08 18:00:59 +053025#include "queue.h" /* TAILQ */
Nirav Shah52d85aa2018-04-26 14:03:00 +053026#ifdef QCA_COMPUTE_TX_DELAY
27#include <enet.h> /* ethernet_hdr_t, etc. */
28#include <ipv6_defs.h> /* ipv6_traffic_class */
29#endif
30
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
32#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
33#include <ol_txrx_htt_api.h> /* htt_tx_status */
34
35#include <ol_ctrl_txrx_api.h>
36#include <cdp_txrx_tx_delay.h>
37#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
38#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
39#ifdef QCA_COMPUTE_TX_DELAY
40#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
41#endif
42#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
43#include <ol_osif_txrx_api.h>
44#include <ol_tx.h> /* ol_tx_reinject */
45#include <ol_tx_send.h>
46
47#include <ol_cfg.h> /* ol_cfg_is_high_latency */
48#include <ol_tx_sched.h>
49#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
50#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
51#endif
52#include <ol_tx_queue.h>
53#include <ol_txrx.h>
54#include <pktlog_ac_fmt.h>
55#include <cdp_txrx_handle.h>
Jeff Johnson8feaa632018-12-07 11:56:02 -080056#include <wlan_reg_services_api.h>
Tiger Yue40e7832019-04-25 10:46:53 +080057#include "qdf_hrtimer.h"
Nirav Shah52d85aa2018-04-26 14:03:00 +053058
59#ifdef QCA_HL_NETDEV_FLOW_CONTROL
60static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
61 [TXRX_FC_5GH_80M_2x2] = 2000,
62 [TXRX_FC_2GH_40M_2x2] = 800,
63};
64#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
65
66/* tx filtering is handled within the target FW */
67#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
68
69u_int16_t
70ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
71{
72 uint16_t desc_pool_size;
73 uint16_t steady_state_tx_lifetime_ms;
74 uint16_t safety_factor;
75
76 /*
77 * Steady-state tx latency:
78 * roughly 1-2 ms flight time
79 * + roughly 1-2 ms prep time,
80 * + roughly 1-2 ms target->host notification time.
81 * = roughly 6 ms total
82 * Thus, steady state number of frames =
83 * steady state max throughput / frame size * tx latency, e.g.
84 * 1 Gbps / 1500 bytes * 6 ms = 500
85 *
86 */
87 steady_state_tx_lifetime_ms = 6;
88
89 safety_factor = 8;
90
91 desc_pool_size =
92 ol_cfg_max_thruput_mbps(ctrl_pdev) *
93 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
94 (8 * OL_TX_AVG_FRM_BYTES) *
95 steady_state_tx_lifetime_ms *
96 safety_factor;
97
98 /* minimum */
99 if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
100 desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
101
102 /* maximum */
103 if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
104 desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
105
106 return desc_pool_size;
107}
108
109#ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
110
111/**
112 * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
113 * for a HL system.
114 * @pdev: the data physical device sending the data
115 * @vdev: the virtual device sending the data
116 * @msdu: the tx frame
117 * @msdu_info: the tx meta data
118 *
119 * Return: the tx decriptor
120 */
121static inline
122struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
123 struct ol_txrx_vdev_t *vdev,
124 qdf_nbuf_t msdu,
125 struct ol_txrx_msdu_info_t *msdu_info)
126{
127 struct ol_tx_desc_t *tx_desc = NULL;
128
129 if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
130 TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
131 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
132 } else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
133 if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
134 QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
135 (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
136 QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
137 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
138 ol_txrx_info("Got tx desc from resv pool\n");
139 }
140 }
141 return tx_desc;
142}
143
144#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
145bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
146{
147 enum qdf_proto_subtype proto_subtype;
148 bool high_prio = false;
149
150 if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
151 if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
152 QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
153 (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
154 QDF_NBUF_CB_PACKET_TYPE_EAPOL))
155 high_prio = true;
156 } else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
157 QDF_NBUF_CB_PACKET_TYPE_ARP) {
158 high_prio = true;
159 } else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
160 QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
161 proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
162 switch (proto_subtype) {
163 case QDF_PROTO_ICMPV6_NA:
164 case QDF_PROTO_ICMPV6_NS:
165 high_prio = true;
166 default:
167 high_prio = false;
168 }
169 }
170 return high_prio;
171}
172
173static inline
174struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
175 struct ol_txrx_vdev_t *vdev,
176 qdf_nbuf_t msdu,
177 struct ol_txrx_msdu_info_t *msdu_info)
178{
179 struct ol_tx_desc_t *tx_desc =
180 ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
181
182 if (!tx_desc)
183 return NULL;
184
185 qdf_spin_lock_bh(&pdev->tx_mutex);
186 /* return if TX flow control disabled */
187 if (vdev->tx_desc_limit == 0) {
188 qdf_spin_unlock_bh(&pdev->tx_mutex);
189 return tx_desc;
190 }
191
192 if (!qdf_atomic_read(&vdev->os_q_paused) &&
193 (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
194 /*
195 * Pause normal priority
196 * netdev queues if tx desc limit crosses
197 */
198 pdev->pause_cb(vdev->vdev_id,
199 WLAN_STOP_NON_PRIORITY_QUEUE,
200 WLAN_DATA_FLOW_CONTROL);
201 qdf_atomic_set(&vdev->os_q_paused, 1);
202 } else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
203 (qdf_atomic_read(&vdev->tx_desc_count)
204 == vdev->tx_desc_limit)) {
205 /* Pause high priority queue */
206 pdev->pause_cb(vdev->vdev_id,
207 WLAN_NETIF_PRIORITY_QUEUE_OFF,
208 WLAN_DATA_FLOW_CONTROL_PRIORITY);
209 vdev->prio_q_paused = 1;
210 }
211 qdf_spin_unlock_bh(&pdev->tx_mutex);
212
213 return tx_desc;
214}
215
216#else
217
218static inline
219struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
220 struct ol_txrx_vdev_t *vdev,
221 qdf_nbuf_t msdu,
222 struct ol_txrx_msdu_info_t *msdu_info)
223{
224 struct ol_tx_desc_t *tx_desc = NULL;
225
226 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
227 return tx_desc;
228}
229#endif
230
Ajit Pal Singhf9a4a382018-12-27 13:48:26 +0530231#ifdef QCA_HL_NETDEV_FLOW_CONTROL
Nirav Shah52d85aa2018-04-26 14:03:00 +0530232/**
233 * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
234 * margin replenishment
235 * @desc_pool_size: tx desc pool size
236 *
237 * Return: threshold low
238 */
239static inline uint16_t
240ol_txrx_rsrc_threshold_lo(int desc_pool_size)
241{
242 int threshold_low;
243
244 /*
245 * 5% margin of unallocated desc is too much for per
246 * vdev mechanism.
247 * Define the value separately.
248 */
249 threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
250
251 return threshold_low;
252}
253
254/**
255 * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
256 * during tx desc margin replenishment
257 * @desc_pool_size: tx desc pool size
258 *
259 * Return: threshold high
260 */
261static inline uint16_t
262ol_txrx_rsrc_threshold_hi(int desc_pool_size)
263{
264 int threshold_high;
265 /* when freeing up descriptors,
266 * keep going until there's a 7.5% margin
267 */
268 threshold_high = ((15 * desc_pool_size) / 100) / 2;
269
270 return threshold_high;
271}
272
273#else
274
275static inline uint16_t
276ol_txrx_rsrc_threshold_lo(int desc_pool_size)
277{
278 int threshold_low;
279 /* always maintain a 5% margin of unallocated descriptors */
280 threshold_low = (5 * desc_pool_size) / 100;
281
282 return threshold_low;
283}
284
285static inline uint16_t
286ol_txrx_rsrc_threshold_hi(int desc_pool_size)
287{
288 int threshold_high;
289 /* when freeing up descriptors, keep going until
290 * there's a 15% margin
291 */
292 threshold_high = (15 * desc_pool_size) / 100;
293
294 return threshold_high;
295}
296#endif
297
298void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
299{
300 uint16_t desc_pool_size, i;
301
302 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
303
304 qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
305 qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
306
307 pdev->tx_queue.rsrc_threshold_lo =
308 ol_txrx_rsrc_threshold_lo(desc_pool_size);
309 pdev->tx_queue.rsrc_threshold_hi =
310 ol_txrx_rsrc_threshold_hi(desc_pool_size);
311
312 for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
313 qdf_atomic_init(&pdev->txq_grps[i].credit);
314
315 ol_tx_target_credit_init(pdev, desc_pool_size);
316}
317
318#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
319static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
320 ol_txrx_vdev_handle vdev,
321 struct ol_tx_desc_t *tx_desc,
322 qdf_nbuf_t msdu,
323 struct ol_txrx_msdu_info_t *tx_msdu_info)
324{
325 if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
326 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
327 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
328 if (tx_msdu_info->peer) {
329 /* remove the peer reference added above */
330 ol_txrx_peer_release_ref(tx_msdu_info->peer,
331 PEER_DEBUG_ID_OL_INTERNAL);
332 }
333 return -EINVAL;
334 }
335
336 return 0;
337}
338#else
339static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
340 ol_txrx_vdev_handle vdev,
341 struct ol_tx_desc_t *tx_desc,
342 qdf_nbuf_t msdu,
343 struct ol_txrx_msdu_info_t *tx_msdu_info)
344{
345 /* no-op */
346 return 0;
347}
348#endif
349
350/**
351 * parse_ocb_tx_header() - Function to check for OCB
352 * @msdu: Pointer to OS packet (qdf_nbuf_t)
353 * @tx_ctrl: TX control header on a packet and extract it if present
354 *
355 * Return: true if ocb parsing is successful
356 */
Nirav Shah575282c2018-07-08 22:48:00 +0530357#ifdef WLAN_FEATURE_DSRC
Nirav Shah52d85aa2018-04-26 14:03:00 +0530358#define OCB_HEADER_VERSION 1
359static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
360 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
361{
Srinivas Girigowdaa9877462019-02-20 11:20:38 -0800362 qdf_ether_header_t *eth_hdr_p;
Nirav Shah52d85aa2018-04-26 14:03:00 +0530363 struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
364
365 /* Check if TX control header is present */
Srinivas Girigowdaa9877462019-02-20 11:20:38 -0800366 eth_hdr_p = (qdf_ether_header_t *)qdf_nbuf_data(msdu);
Nirav Shah52d85aa2018-04-26 14:03:00 +0530367 if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
368 /* TX control header is not present. Nothing to do.. */
369 return true;
370
371 /* Remove the ethernet header */
Srinivas Girigowdaa9877462019-02-20 11:20:38 -0800372 qdf_nbuf_pull_head(msdu, sizeof(qdf_ether_header_t));
Nirav Shah52d85aa2018-04-26 14:03:00 +0530373
374 /* Parse the TX control header */
375 tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
376
377 if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
378 if (tx_ctrl)
379 qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
380 sizeof(*tx_ctrl_hdr));
381 } else {
382 /* The TX control header is invalid. */
383 return false;
384 }
385
386 /* Remove the TX control header */
387 qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
388 return true;
389}
Nirav Shah575282c2018-07-08 22:48:00 +0530390#else
391static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
392 struct ocb_tx_ctrl_hdr_t *tx_ctrl)
393{
394 return true;
395}
396#endif
Nirav Shah52d85aa2018-04-26 14:03:00 +0530397
398/**
399 * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
400 * for management frame
401 * @pdev: the data physical device sending the data
402 * @vdev: the virtual device sending the data
403 * @tx_mgmt_frm: the tx management frame
404 * @tx_msdu_info: the tx meta data
405 *
406 * Return: the tx decriptor
407 */
408struct ol_tx_desc_t *
409ol_txrx_mgmt_tx_desc_alloc(
410 struct ol_txrx_pdev_t *pdev,
411 struct ol_txrx_vdev_t *vdev,
412 qdf_nbuf_t tx_mgmt_frm,
413 struct ol_txrx_msdu_info_t *tx_msdu_info)
414{
415 struct ol_tx_desc_t *tx_desc;
416
417 tx_msdu_info->htt.action.tx_comp_req = 1;
418 tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
419 return tx_desc;
420}
421
422/**
423 * ol_txrx_mgmt_send_frame() - send a management frame
424 * @vdev: virtual device sending the frame
425 * @tx_desc: tx desc
426 * @tx_mgmt_frm: management frame to send
427 * @tx_msdu_info: the tx meta data
428 * @chanfreq: download change frequency
429 *
430 * Return:
431 * 0 -> the frame is accepted for transmission, -OR-
432 * 1 -> the frame was not accepted
433 */
434int ol_txrx_mgmt_send_frame(
435 struct ol_txrx_vdev_t *vdev,
436 struct ol_tx_desc_t *tx_desc,
437 qdf_nbuf_t tx_mgmt_frm,
438 struct ol_txrx_msdu_info_t *tx_msdu_info,
439 uint16_t chanfreq)
440{
441 struct ol_txrx_pdev_t *pdev = vdev->pdev;
442 struct ol_tx_frms_queue_t *txq;
443 int status = 1;
444
445 /*
446 * 1. Look up the peer and queue the frame in the peer's mgmt queue.
447 * 2. Invoke the download scheduler.
448 */
449 txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
450 if (!txq) {
451 /* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
452 * msdu);
453 */
454 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
455 ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
456 1 /* error */);
457 goto out; /* can't accept the tx mgmt frame */
458 }
459 /* Initialize the HTT tx desc l2 header offset field.
460 * Even though tx encap does not apply to mgmt frames,
461 * htt_tx_desc_mpdu_header still needs to be called,
462 * to specifiy that there was no L2 header added by tx encap,
463 * so the frame's length does not need to be adjusted to account for
464 * an added L2 header.
465 */
466 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
467 if (qdf_unlikely(htt_tx_desc_init(
468 pdev->htt_pdev, tx_desc->htt_tx_desc,
469 tx_desc->htt_tx_desc_paddr,
470 ol_tx_desc_id(pdev, tx_desc),
471 tx_mgmt_frm,
472 &tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0)))
473 goto out;
474 htt_tx_desc_display(tx_desc->htt_tx_desc);
475 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
476
477 ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
478 ol_tx_sched(vdev->pdev);
479 status = 0;
480out:
481 if (tx_msdu_info->peer) {
482 /* remove the peer reference added above */
483 ol_txrx_peer_release_ref(tx_msdu_info->peer,
484 PEER_DEBUG_ID_OL_INTERNAL);
485 }
486
487 return status;
488}
489
490/**
491 * ol_tx_hl_base() - send tx frames for a HL system.
492 * @vdev: the virtual device sending the data
493 * @tx_spec: indicate what non-standard transmission actions to apply
494 * @msdu_list: the tx frames to send
495 * @tx_comp_req: tx completion req
Tiger Yue40e7832019-04-25 10:46:53 +0800496 * @call_sched: will schedule the tx if true
Nirav Shah52d85aa2018-04-26 14:03:00 +0530497 *
498 * Return: NULL if all MSDUs are accepted
499 */
500static inline qdf_nbuf_t
501ol_tx_hl_base(
502 ol_txrx_vdev_handle vdev,
503 enum ol_tx_spec tx_spec,
504 qdf_nbuf_t msdu_list,
Tiger Yue40e7832019-04-25 10:46:53 +0800505 int tx_comp_req,
506 bool call_sched)
Nirav Shah52d85aa2018-04-26 14:03:00 +0530507{
508 struct ol_txrx_pdev_t *pdev = vdev->pdev;
509 qdf_nbuf_t msdu = msdu_list;
510 struct ol_txrx_msdu_info_t tx_msdu_info;
511 struct ocb_tx_ctrl_hdr_t tx_ctrl;
512 htt_pdev_handle htt_pdev = pdev->htt_pdev;
513
514 tx_msdu_info.tso_info.is_tso = 0;
515
516 /*
517 * The msdu_list variable could be used instead of the msdu var,
518 * but just to clarify which operations are done on a single MSDU
519 * vs. a list of MSDUs, use a distinct variable for single MSDUs
520 * within the list.
521 */
522 while (msdu) {
523 qdf_nbuf_t next;
524 struct ol_tx_frms_queue_t *txq;
525 struct ol_tx_desc_t *tx_desc = NULL;
526
527 qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
528 tx_msdu_info.peer = NULL;
529 /*
530 * The netbuf will get stored into a (peer-TID) tx queue list
531 * inside the ol_tx_classify_store function or else dropped,
532 * so store the next pointer immediately.
533 */
534 next = qdf_nbuf_next(msdu);
535
536 tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
537
538 if (!tx_desc) {
539 /*
540 * If we're out of tx descs, there's no need to try
541 * to allocate tx descs for the remaining MSDUs.
542 */
543 TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
544 msdu);
545 return msdu; /* the list of unaccepted MSDUs */
546 }
547
548 /* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
549
Nirav Shah38ccf5b2019-07-03 10:22:11 +0530550 qdf_dp_trace_log_pkt(vdev->vdev_id, msdu, QDF_TX,
551 QDF_TRACE_DEFAULT_PDEV_ID);
552 DPTRACE(qdf_dp_trace_data_pkt(msdu, QDF_TRACE_DEFAULT_PDEV_ID,
553 QDF_DP_TRACE_TX_PACKET_RECORD,
554 tx_desc->id, QDF_TX));
555
Nirav Shah52d85aa2018-04-26 14:03:00 +0530556 if (tx_spec != OL_TX_SPEC_STD) {
557#if defined(FEATURE_WLAN_TDLS)
558 if (tx_spec & OL_TX_SPEC_NO_FREE) {
559 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
560 } else if (tx_spec & OL_TX_SPEC_TSO) {
561#else
562 if (tx_spec & OL_TX_SPEC_TSO) {
563#endif
564 tx_desc->pkt_type = OL_TX_FRM_TSO;
565 }
566 if (ol_txrx_tx_is_raw(tx_spec)) {
567 /* CHECK THIS: does this need
568 * to happen after htt_tx_desc_init?
569 */
570 /* different types of raw frames */
571 u_int8_t sub_type =
572 ol_txrx_tx_raw_subtype(
573 tx_spec);
574 htt_tx_desc_type(htt_pdev,
575 tx_desc->htt_tx_desc,
576 htt_pkt_type_raw,
577 sub_type);
578 }
579 }
580
581 tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
582 tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
583 tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
584 tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
Nirav Shah7f37dbe2019-08-05 17:43:43 +0530585
586 if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(msdu)
587 == 1) {
588 tx_msdu_info.htt.action.tx_comp_req = 1;
589 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
590 } else {
591 tx_msdu_info.htt.action.tx_comp_req =
592 tx_comp_req;
593 }
Nirav Shah52d85aa2018-04-26 14:03:00 +0530594
595 /* If the vdev is in OCB mode,
596 * parse the tx control header.
597 */
598 if (vdev->opmode == wlan_op_mode_ocb) {
599 if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
600 /* There was an error parsing
601 * the header.Skip this packet.
602 */
603 goto MSDU_LOOP_BOTTOM;
604 }
605 }
606
607 txq = ol_tx_classify(vdev, tx_desc, msdu,
608 &tx_msdu_info);
609
Tiger Yuf1551cb2018-10-26 13:11:12 +0800610 /* initialize the HW tx descriptor */
611 htt_tx_desc_init(
612 pdev->htt_pdev, tx_desc->htt_tx_desc,
613 tx_desc->htt_tx_desc_paddr,
614 ol_tx_desc_id(pdev, tx_desc),
615 msdu,
616 &tx_msdu_info.htt,
617 &tx_msdu_info.tso_info,
618 &tx_ctrl,
619 vdev->opmode == wlan_op_mode_ocb);
620
Nirav Shah52d85aa2018-04-26 14:03:00 +0530621 if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
622 /* drop this frame,
623 * but try sending subsequent frames
624 */
625 /* TXRX_STATS_MSDU_LIST_INCR(pdev,
626 * tx.dropped.no_txq, msdu);
627 */
628 qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
629 ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
630 if (tx_msdu_info.peer) {
631 /* remove the peer reference
632 * added above
633 */
634 ol_txrx_peer_release_ref(
635 tx_msdu_info.peer,
636 PEER_DEBUG_ID_OL_INTERNAL);
637 }
638 goto MSDU_LOOP_BOTTOM;
639 }
640
641 if (tx_msdu_info.peer) {
642 /*
643 * If the state is not associated then drop all
644 * the data packets received for that peer
645 */
646 if (tx_msdu_info.peer->state ==
647 OL_TXRX_PEER_STATE_DISC) {
648 qdf_atomic_inc(
649 &pdev->tx_queue.rsrc_cnt);
650 ol_tx_desc_frame_free_nonstd(pdev,
651 tx_desc,
652 1);
653 ol_txrx_peer_release_ref(
654 tx_msdu_info.peer,
655 PEER_DEBUG_ID_OL_INTERNAL);
656 msdu = next;
657 continue;
658 } else if (tx_msdu_info.peer->state !=
659 OL_TXRX_PEER_STATE_AUTH) {
660 if (tx_msdu_info.htt.info.ethertype !=
661 ETHERTYPE_PAE &&
662 tx_msdu_info.htt.info.ethertype
663 != ETHERTYPE_WAI) {
664 qdf_atomic_inc(
665 &pdev->tx_queue.
666 rsrc_cnt);
667 ol_tx_desc_frame_free_nonstd(
668 pdev,
669 tx_desc, 1);
670 ol_txrx_peer_release_ref(
671 tx_msdu_info.peer,
672 PEER_DEBUG_ID_OL_INTERNAL);
673 msdu = next;
674 continue;
675 }
676 }
677 }
678 /*
679 * Initialize the HTT tx desc l2 header offset field.
680 * htt_tx_desc_mpdu_header needs to be called to
681 * make sure, the l2 header size is initialized
682 * correctly to handle cases where TX ENCAP is disabled
683 * or Tx Encap fails to perform Encap
684 */
685 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
686
687 /*
688 * Note: when the driver is built without support for
689 * SW tx encap,the following macro is a no-op.
690 * When the driver is built with support for SW tx
691 * encap, it performs encap, and if an error is
692 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
693 */
694 if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
695 &tx_msdu_info))
696 goto MSDU_LOOP_BOTTOM;
697
Nirav Shah52d85aa2018-04-26 14:03:00 +0530698 /*
699 * If debug display is enabled, show the meta-data
700 * being downloaded to the target via the
701 * HTT tx descriptor.
702 */
703 htt_tx_desc_display(tx_desc->htt_tx_desc);
704
705 ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
706 if (tx_msdu_info.peer) {
707 OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
708 msdu);
709 /* remove the peer reference added above */
710 ol_txrx_peer_release_ref
711 (tx_msdu_info.peer,
712 PEER_DEBUG_ID_OL_INTERNAL);
713 }
714MSDU_LOOP_BOTTOM:
715 msdu = next;
716 }
Tiger Yue40e7832019-04-25 10:46:53 +0800717
718 if (call_sched)
719 ol_tx_sched(pdev);
Nirav Shah52d85aa2018-04-26 14:03:00 +0530720 return NULL; /* all MSDUs were accepted */
721}
722
Tiger Yue40e7832019-04-25 10:46:53 +0800723#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
724
725/**
726 * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
727 * @ppdev: the data physical device
728 *
729 * Return: none
730 */
731void
732ol_tx_pdev_reset_driver_del_ack(struct cdp_pdev *ppdev)
733{
734 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
735 struct ol_txrx_vdev_t *vdev;
736
737 if (!pdev)
738 return;
739
740 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
741 vdev->driver_del_ack_enabled = false;
742
743 dp_debug("vdev_id %d driver_del_ack_enabled %d",
744 vdev->vdev_id, vdev->driver_del_ack_enabled);
745 }
746}
747
748/**
749 * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
750 * @vdev_id: vdev id
751 * @rx_packets: number of rx packets
752 * @time_in_ms: time in ms
753 * @high_th: high threshold
754 * @low_th: low threshold
755 *
756 * Return: none
757 */
758void
759ol_tx_vdev_set_driver_del_ack_enable(uint8_t vdev_id,
760 unsigned long rx_packets,
761 uint32_t time_in_ms,
762 uint32_t high_th,
763 uint32_t low_th)
764{
765 struct ol_txrx_vdev_t *vdev =
766 (struct ol_txrx_vdev_t *)
767 ol_txrx_get_vdev_from_vdev_id(vdev_id);
768 bool old_driver_del_ack_enabled;
769
770 if ((!vdev) || (low_th > high_th))
771 return;
772
773 old_driver_del_ack_enabled = vdev->driver_del_ack_enabled;
774 if (rx_packets > high_th)
775 vdev->driver_del_ack_enabled = true;
776 else if (rx_packets < low_th)
777 vdev->driver_del_ack_enabled = false;
778
779 if (old_driver_del_ack_enabled != vdev->driver_del_ack_enabled) {
780 dp_debug("vdev_id %d driver_del_ack_enabled %d rx_packets %ld time_in_ms %d high_th %d low_th %d",
781 vdev->vdev_id, vdev->driver_del_ack_enabled,
782 rx_packets, time_in_ms, high_th, low_th);
783 }
784}
785
786/**
787 * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
788 * @vdev: vdev handle
789 *
790 * Return: none
791 */
792void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev)
793{
794 int i;
795 struct tcp_stream_node *tcp_node_list;
796 struct tcp_stream_node *temp;
Li Feng1b311682019-10-18 11:08:13 +0800797 struct ol_txrx_pdev_t *pdev = vdev->pdev;
Tiger Yue40e7832019-04-25 10:46:53 +0800798
799 for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
800 tcp_node_list = NULL;
801 qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
802 if (vdev->tcp_ack_hash.node[i].no_of_entries)
803 tcp_node_list = vdev->tcp_ack_hash.node[i].head;
804
805 vdev->tcp_ack_hash.node[i].no_of_entries = 0;
806 vdev->tcp_ack_hash.node[i].head = NULL;
807 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
808
809 /* Send all packets */
810 while (tcp_node_list) {
Subrat Dashc8259cd2019-09-04 16:28:19 +0530811 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
812 pdev->cfg.request_tx_comp;
Tiger Yue40e7832019-04-25 10:46:53 +0800813 qdf_nbuf_t msdu_list;
814
815 temp = tcp_node_list;
816 tcp_node_list = temp->next;
817
818 msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
819 temp->head,
820 tx_comp_req, false);
821 if (msdu_list)
822 qdf_nbuf_tx_free(msdu_list, 1/*error*/);
823 ol_txrx_vdev_free_tcp_node(vdev, temp);
824 }
825 }
826 ol_tx_sched(vdev->pdev);
827}
828
829/**
830 * tcp_del_ack_tasklet() - tasklet function to send ack packets
831 * @data: vdev handle
832 *
833 * Return: none
834 */
835void tcp_del_ack_tasklet(void *data)
836{
837 struct ol_txrx_vdev_t *vdev = data;
838
839 ol_tx_hl_send_all_tcp_ack(vdev);
840}
841
842/**
843 * ol_tx_get_stream_id() - get stream_id from packet info
844 * @info: packet info
845 *
846 * Return: stream_id
847 */
848uint16_t ol_tx_get_stream_id(struct packet_info *info)
849{
850 return ((info->dst_port + info->dst_ip + info->src_port + info->src_ip)
851 & (OL_TX_HL_DEL_ACK_HASH_SIZE - 1));
852}
853
854/**
855 * ol_tx_is_tcp_ack() - check whether the packet is tcp ack frame
856 * @msdu: packet
857 *
858 * Return: true if the packet is tcp ack frame
859 */
860static bool
861ol_tx_is_tcp_ack(qdf_nbuf_t msdu)
862{
863 uint16_t ether_type;
864 uint8_t protocol;
865 uint8_t flag, ip_header_len, tcp_header_len;
866 uint32_t seg_len;
867 uint8_t *skb_data;
868 uint32_t skb_len;
869 bool tcp_acked = false;
870 uint32_t tcp_header_off;
871
872 qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
873 if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
874 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
875 QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
876 goto exit;
877
878 ether_type = (uint16_t)(*(uint16_t *)
879 (skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
880 protocol = (uint16_t)(*(uint16_t *)
881 (skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
882
883 if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
884 (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
885 ip_header_len = ((uint8_t)(*(uint8_t *)
886 (skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
887 QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
888 tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
889
890 tcp_header_len = ((uint8_t)(*(uint8_t *)
891 (skb_data + tcp_header_off +
892 QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
893 seg_len = skb_len - tcp_header_len - tcp_header_off;
894 flag = (uint8_t)(*(uint8_t *)
895 (skb_data + tcp_header_off +
896 QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
897
898 if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0))
899 tcp_acked = true;
900 }
901
902exit:
903
904 return tcp_acked;
905}
906
907/**
908 * ol_tx_get_packet_info() - update packet info for passed msdu
909 * @msdu: packet
910 * @info: packet info
911 *
912 * Return: none
913 */
914void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info)
915{
916 uint16_t ether_type;
917 uint8_t protocol;
918 uint8_t flag, ip_header_len, tcp_header_len;
919 uint32_t seg_len;
920 uint8_t *skb_data;
921 uint32_t skb_len;
922 uint32_t tcp_header_off;
923
924 info->type = NO_TCP_PKT;
925
926 qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
927 if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
928 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
929 QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
930 return;
931
932 ether_type = (uint16_t)(*(uint16_t *)
933 (skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
934 protocol = (uint16_t)(*(uint16_t *)
935 (skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
936
937 if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
938 (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
939 ip_header_len = ((uint8_t)(*(uint8_t *)
940 (skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
941 QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
942 tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
943
944 tcp_header_len = ((uint8_t)(*(uint8_t *)
945 (skb_data + tcp_header_off +
946 QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
947 seg_len = skb_len - tcp_header_len - tcp_header_off;
948 flag = (uint8_t)(*(uint8_t *)
949 (skb_data + tcp_header_off +
950 QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
951
952 info->src_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
953 (skb_data + QDF_NBUF_TRAC_IPV4_SRC_ADDR_OFFSET)));
954 info->dst_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
955 (skb_data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET)));
956 info->src_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
957 (skb_data + tcp_header_off +
958 QDF_NBUF_TRAC_TCP_SPORT_OFFSET)));
959 info->dst_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
960 (skb_data + tcp_header_off +
961 QDF_NBUF_TRAC_TCP_DPORT_OFFSET)));
962 info->stream_id = ol_tx_get_stream_id(info);
963
964 if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0)) {
965 info->type = TCP_PKT_ACK;
966 info->ack_number = (uint32_t)(*(uint32_t *)
967 (skb_data + tcp_header_off +
968 QDF_NBUF_TRAC_TCP_ACK_OFFSET));
969 info->ack_number = QDF_SWAP_U32(info->ack_number);
970 } else {
971 info->type = TCP_PKT_NO_ACK;
972 }
973 }
974}
975
976/**
977 * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
978 * stream info
979 * @vdev: vdev handle
980 * @info: packet info
981 *
982 * Return: none
983 */
984void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
985 struct packet_info *info)
986{
987 uint8_t no_of_entries;
988 struct tcp_stream_node *node_to_be_remove = NULL;
Li Feng1b311682019-10-18 11:08:13 +0800989 struct ol_txrx_pdev_t *pdev = vdev->pdev;
Tiger Yue40e7832019-04-25 10:46:53 +0800990
991 /* remove tcp node from hash */
992 qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
993 hash_node_lock);
994
995 no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].
996 no_of_entries;
997 if (no_of_entries > 1) {
998 /* collision case */
999 struct tcp_stream_node *head =
1000 vdev->tcp_ack_hash.node[info->stream_id].head;
1001 struct tcp_stream_node *temp;
1002
1003 if ((head->dst_ip == info->dst_ip) &&
1004 (head->src_ip == info->src_ip) &&
1005 (head->src_port == info->src_port) &&
1006 (head->dst_port == info->dst_port)) {
1007 node_to_be_remove = head;
1008 vdev->tcp_ack_hash.node[info->stream_id].head =
1009 head->next;
1010 vdev->tcp_ack_hash.node[info->stream_id].
1011 no_of_entries--;
1012 } else {
1013 temp = head;
1014 while (temp->next) {
1015 if ((temp->next->dst_ip == info->dst_ip) &&
1016 (temp->next->src_ip == info->src_ip) &&
1017 (temp->next->src_port == info->src_port) &&
1018 (temp->next->dst_port == info->dst_port)) {
1019 node_to_be_remove = temp->next;
1020 temp->next = temp->next->next;
1021 vdev->tcp_ack_hash.
1022 node[info->stream_id].
1023 no_of_entries--;
1024 break;
1025 }
1026 temp = temp->next;
1027 }
1028 }
1029 } else if (no_of_entries == 1) {
1030 /* Only one tcp_node */
1031 node_to_be_remove =
1032 vdev->tcp_ack_hash.node[info->stream_id].head;
1033 vdev->tcp_ack_hash.node[info->stream_id].head = NULL;
1034 vdev->tcp_ack_hash.node[info->stream_id].no_of_entries = 0;
1035 }
1036 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1037 node[info->stream_id].hash_node_lock);
1038
1039 /* send packets */
1040 if (node_to_be_remove) {
Subrat Dashc8259cd2019-09-04 16:28:19 +05301041 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1042 pdev->cfg.request_tx_comp;
Tiger Yue40e7832019-04-25 10:46:53 +08001043 qdf_nbuf_t msdu_list;
1044
1045 msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1046 node_to_be_remove->head,
1047 tx_comp_req, true);
1048 if (msdu_list)
1049 qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1050 ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1051 }
1052}
1053
1054static struct tcp_stream_node *
1055ol_tx_hl_rep_tcp_ack(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu,
1056 struct packet_info *info, bool *is_found,
1057 bool *start_timer)
1058{
1059 struct tcp_stream_node *node_to_be_remove = NULL;
1060 struct tcp_stream_node *head =
1061 vdev->tcp_ack_hash.node[info->stream_id].head;
1062 struct tcp_stream_node *temp;
1063
1064 if ((head->dst_ip == info->dst_ip) &&
1065 (head->src_ip == info->src_ip) &&
1066 (head->src_port == info->src_port) &&
1067 (head->dst_port == info->dst_port)) {
1068 *is_found = true;
1069 if ((head->ack_number < info->ack_number) &&
1070 (head->no_of_ack_replaced <
1071 ol_cfg_get_del_ack_count_value(vdev->pdev->ctrl_pdev))) {
1072 /* replace ack packet */
1073 qdf_nbuf_tx_free(head->head, 1);
1074 head->head = msdu;
1075 head->ack_number = info->ack_number;
1076 head->no_of_ack_replaced++;
1077 *start_timer = true;
1078
1079 vdev->no_of_tcpack_replaced++;
1080
1081 if (head->no_of_ack_replaced ==
1082 ol_cfg_get_del_ack_count_value(
1083 vdev->pdev->ctrl_pdev)) {
1084 node_to_be_remove = head;
1085 vdev->tcp_ack_hash.node[info->stream_id].head =
1086 head->next;
1087 vdev->tcp_ack_hash.node[info->stream_id].
1088 no_of_entries--;
1089 }
1090 } else {
1091 /* append and send packets */
1092 head->head->next = msdu;
1093 node_to_be_remove = head;
1094 vdev->tcp_ack_hash.node[info->stream_id].head =
1095 head->next;
1096 vdev->tcp_ack_hash.node[info->stream_id].
1097 no_of_entries--;
1098 }
1099 } else {
1100 temp = head;
1101 while (temp->next) {
1102 if ((temp->next->dst_ip == info->dst_ip) &&
1103 (temp->next->src_ip == info->src_ip) &&
1104 (temp->next->src_port == info->src_port) &&
1105 (temp->next->dst_port == info->dst_port)) {
1106 *is_found = true;
1107 if ((temp->next->ack_number <
1108 info->ack_number) &&
1109 (temp->next->no_of_ack_replaced <
1110 ol_cfg_get_del_ack_count_value(
1111 vdev->pdev->ctrl_pdev))) {
1112 /* replace ack packet */
1113 qdf_nbuf_tx_free(temp->next->head, 1);
1114 temp->next->head = msdu;
1115 temp->next->ack_number =
1116 info->ack_number;
1117 temp->next->no_of_ack_replaced++;
1118 *start_timer = true;
1119
1120 vdev->no_of_tcpack_replaced++;
1121
1122 if (temp->next->no_of_ack_replaced ==
1123 ol_cfg_get_del_ack_count_value(
1124 vdev->pdev->ctrl_pdev)) {
1125 node_to_be_remove = temp->next;
1126 temp->next = temp->next->next;
1127 vdev->tcp_ack_hash.
1128 node[info->stream_id].
1129 no_of_entries--;
1130 }
1131 } else {
1132 /* append and send packets */
1133 temp->next->head->next = msdu;
1134 node_to_be_remove = temp->next;
1135 temp->next = temp->next->next;
1136 vdev->tcp_ack_hash.
1137 node[info->stream_id].
1138 no_of_entries--;
1139 }
1140 break;
1141 }
1142 temp = temp->next;
1143 }
1144 }
1145 return node_to_be_remove;
1146}
1147
1148/**
1149 * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
1150 * passed packet info
1151 * @vdev: vdev handle
1152 * @msdu: packet
1153 * @info: packet info
1154 *
1155 * Return: none
1156 */
1157void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
1158 qdf_nbuf_t msdu,
1159 struct packet_info *info)
1160{
1161 uint8_t no_of_entries;
1162 struct tcp_stream_node *node_to_be_remove = NULL;
1163 bool is_found = false, start_timer = false;
Li Feng1b311682019-10-18 11:08:13 +08001164 struct ol_txrx_pdev_t *pdev = vdev->pdev;
Tiger Yue40e7832019-04-25 10:46:53 +08001165
1166 /* replace ack if required or send packets */
1167 qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1168 hash_node_lock);
1169
1170 no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].no_of_entries;
1171 if (no_of_entries > 0) {
1172 node_to_be_remove = ol_tx_hl_rep_tcp_ack(vdev, msdu, info,
1173 &is_found,
1174 &start_timer);
1175 }
1176
1177 if (no_of_entries == 0 || !is_found) {
1178 /* Alloc new tcp node */
1179 struct tcp_stream_node *new_node;
1180
1181 new_node = ol_txrx_vdev_alloc_tcp_node(vdev);
1182 if (!new_node) {
1183 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1184 node[info->stream_id].hash_node_lock);
1185 dp_alert("Malloc failed");
1186 return;
1187 }
1188 new_node->stream_id = info->stream_id;
1189 new_node->dst_ip = info->dst_ip;
1190 new_node->src_ip = info->src_ip;
1191 new_node->dst_port = info->dst_port;
1192 new_node->src_port = info->src_port;
1193 new_node->ack_number = info->ack_number;
1194 new_node->head = msdu;
1195 new_node->next = NULL;
1196 new_node->no_of_ack_replaced = 0;
1197
1198 start_timer = true;
1199 /* insert new_node */
1200 if (!vdev->tcp_ack_hash.node[info->stream_id].head) {
1201 vdev->tcp_ack_hash.node[info->stream_id].head =
1202 new_node;
1203 vdev->tcp_ack_hash.node[info->stream_id].
1204 no_of_entries = 1;
1205 } else {
1206 struct tcp_stream_node *temp =
1207 vdev->tcp_ack_hash.node[info->stream_id].head;
1208 while (temp->next)
1209 temp = temp->next;
1210
1211 temp->next = new_node;
1212 vdev->tcp_ack_hash.node[info->stream_id].
1213 no_of_entries++;
1214 }
1215 }
1216 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1217 hash_node_lock);
1218
1219 /* start timer */
1220 if (start_timer &&
1221 (!qdf_atomic_read(&vdev->tcp_ack_hash.is_timer_running))) {
1222 qdf_hrtimer_start(&vdev->tcp_ack_hash.timer,
1223 qdf_ns_to_ktime((
1224 ol_cfg_get_del_ack_timer_value(
1225 vdev->pdev->ctrl_pdev) *
1226 1000000)),
1227 __QDF_HRTIMER_MODE_REL);
1228 qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 1);
1229 }
1230
1231 /* send packets */
1232 if (node_to_be_remove) {
Subrat Dashc8259cd2019-09-04 16:28:19 +05301233 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1234 pdev->cfg.request_tx_comp;
Tiger Yue40e7832019-04-25 10:46:53 +08001235 qdf_nbuf_t msdu_list = NULL;
1236
1237 msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1238 node_to_be_remove->head,
1239 tx_comp_req, true);
1240 if (msdu_list)
1241 qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1242 ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1243 }
1244}
1245
1246/**
1247 * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
1248 * @timer: timer handle
1249 *
1250 * Return: enum
1251 */
1252enum qdf_hrtimer_restart_status
1253ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer)
1254{
1255 struct ol_txrx_vdev_t *vdev = qdf_container_of(timer,
1256 struct ol_txrx_vdev_t,
1257 tcp_ack_hash.timer);
1258 enum qdf_hrtimer_restart_status ret = __QDF_HRTIMER_NORESTART;
1259
1260 qdf_sched_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1261 qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 0);
1262 return ret;
1263}
1264
1265/**
1266 * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
1267 * @vdev: vdev handle
1268 *
1269 * Return: none
1270 */
1271void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev)
1272{
1273 int i;
1274 struct tcp_stream_node *tcp_node_list;
1275 struct tcp_stream_node *temp;
1276
1277 qdf_hrtimer_cancel(&vdev->tcp_ack_hash.timer);
1278 for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1279 tcp_node_list = NULL;
1280 qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1281
1282 if (vdev->tcp_ack_hash.node[i].no_of_entries)
1283 tcp_node_list = vdev->tcp_ack_hash.node[i].head;
1284
1285 vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1286 vdev->tcp_ack_hash.node[i].head = NULL;
1287 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1288
1289 /* free all packets */
1290 while (tcp_node_list) {
1291 temp = tcp_node_list;
1292 tcp_node_list = temp->next;
1293
1294 qdf_nbuf_tx_free(temp->head, 1/*error*/);
1295 ol_txrx_vdev_free_tcp_node(vdev, temp);
1296 }
1297 }
1298 ol_txrx_vdev_deinit_tcp_del_ack(vdev);
1299}
1300
1301/**
1302 * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
1303 * @vdev: vdev handle
1304 *
1305 * Return: none
1306 */
1307void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1308{
1309 int i;
1310
1311 vdev->driver_del_ack_enabled = false;
1312
1313 dp_debug("vdev-id=%u, driver_del_ack_enabled=%d",
1314 vdev->vdev_id,
1315 vdev->driver_del_ack_enabled);
1316
1317 vdev->no_of_tcpack = 0;
1318 vdev->no_of_tcpack_replaced = 0;
1319
1320 qdf_hrtimer_init(&vdev->tcp_ack_hash.timer,
1321 ol_tx_hl_vdev_tcp_del_ack_timer,
1322 __QDF_CLOCK_MONOTONIC,
1323 __QDF_HRTIMER_MODE_REL,
1324 QDF_CONTEXT_HARDWARE
1325 );
1326 qdf_create_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq,
1327 tcp_del_ack_tasklet,
1328 vdev);
1329 qdf_atomic_init(&vdev->tcp_ack_hash.is_timer_running);
1330 qdf_atomic_init(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1331 qdf_spinlock_create(&vdev->tcp_ack_hash.tcp_free_list_lock);
1332 vdev->tcp_ack_hash.tcp_free_list = NULL;
1333 for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1334 qdf_spinlock_create(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1335 vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1336 vdev->tcp_ack_hash.node[i].head = NULL;
1337 }
1338}
1339
1340/**
1341 * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
1342 * @vdev: vdev handle
1343 *
1344 * Return: none
1345 */
1346void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1347{
1348 struct tcp_stream_node *temp;
1349
1350 qdf_destroy_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1351
1352 qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1353 while (vdev->tcp_ack_hash.tcp_free_list) {
1354 temp = vdev->tcp_ack_hash.tcp_free_list;
1355 vdev->tcp_ack_hash.tcp_free_list = temp->next;
1356 qdf_mem_free(temp);
1357 }
1358 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1359}
1360
1361/**
1362 * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
1363 * @vdev: vdev handle
1364 * @node: tcp stream node
1365 *
1366 * Return: none
1367 */
1368void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
1369 struct tcp_stream_node *node)
1370{
1371 qdf_atomic_dec(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1372
1373 qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1374 if (vdev->tcp_ack_hash.tcp_free_list) {
1375 node->next = vdev->tcp_ack_hash.tcp_free_list;
1376 vdev->tcp_ack_hash.tcp_free_list = node;
1377 } else {
1378 vdev->tcp_ack_hash.tcp_free_list = node;
1379 node->next = NULL;
1380 }
1381 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1382}
1383
1384/**
1385 * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
1386 * @vdev: vdev handle
1387 *
1388 * Return: tcp stream node
1389 */
1390struct tcp_stream_node *ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev)
1391{
1392 struct tcp_stream_node *node = NULL;
1393
1394 qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1395 if (vdev->tcp_ack_hash.tcp_free_list) {
1396 node = vdev->tcp_ack_hash.tcp_free_list;
1397 vdev->tcp_ack_hash.tcp_free_list = node->next;
1398 }
1399 qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1400
1401 if (!node) {
1402 node = qdf_mem_malloc(sizeof(struct ol_txrx_vdev_t));
1403 if (!node)
1404 return NULL;
1405 }
1406 qdf_atomic_inc(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1407 return node;
1408}
1409
1410qdf_nbuf_t
1411ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1412{
1413 struct ol_txrx_pdev_t *pdev = vdev->pdev;
Subrat Dashc8259cd2019-09-04 16:28:19 +05301414 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1415 pdev->cfg.request_tx_comp;
Tiger Yue40e7832019-04-25 10:46:53 +08001416 struct packet_info pkt_info;
1417 qdf_nbuf_t temp;
1418
1419 if (ol_tx_is_tcp_ack(msdu_list))
1420 vdev->no_of_tcpack++;
1421
1422 /* check Enable through ini */
1423 if (!ol_cfg_get_del_ack_enable_value(vdev->pdev->ctrl_pdev) ||
1424 (!vdev->driver_del_ack_enabled)) {
1425 if (qdf_atomic_read(&vdev->tcp_ack_hash.tcp_node_in_use_count))
1426 ol_tx_hl_send_all_tcp_ack(vdev);
1427
1428 return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1429 tx_comp_req, true);
1430 }
1431
1432 ol_tx_get_packet_info(msdu_list, &pkt_info);
1433
1434 if (pkt_info.type == TCP_PKT_NO_ACK) {
1435 ol_tx_hl_find_and_send_tcp_stream(vdev, &pkt_info);
1436 temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1437 tx_comp_req, true);
1438 return temp;
1439 }
1440
1441 if (pkt_info.type == TCP_PKT_ACK) {
1442 ol_tx_hl_find_and_replace_tcp_ack(vdev, msdu_list, &pkt_info);
1443 return NULL;
1444 }
1445
1446 temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1447 tx_comp_req, true);
1448 return temp;
1449}
1450#else
1451
Nirav Shah52d85aa2018-04-26 14:03:00 +05301452qdf_nbuf_t
1453ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1454{
1455 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1456 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1457 pdev->cfg.request_tx_comp;
1458
Tiger Yue40e7832019-04-25 10:46:53 +08001459 return ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1460 msdu_list, tx_comp_req, true);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301461}
Tiger Yue40e7832019-04-25 10:46:53 +08001462#endif
Nirav Shah52d85aa2018-04-26 14:03:00 +05301463
1464qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
1465 enum ol_tx_spec tx_spec,
1466 qdf_nbuf_t msdu_list)
1467{
1468 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1469 int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1470 pdev->cfg.request_tx_comp;
1471
1472 if (!tx_comp_req) {
1473 if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1474 (pdev->tx_data_callback.func))
1475 tx_comp_req = 1;
1476 }
Tiger Yue40e7832019-04-25 10:46:53 +08001477 return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req, true);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301478}
1479
1480#ifdef FEATURE_WLAN_TDLS
1481/**
1482 * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
1483 * @vdev: the data virtual device
1484 * @bss_addr: bss address
1485 *
1486 * Return: None
1487 */
1488void ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
1489{
1490 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1491
1492 qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
1493 if (bss_addr && vdev->last_real_peer &&
1494 !qdf_mem_cmp((u8 *)bss_addr,
1495 vdev->last_real_peer->mac_addr.raw,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08001496 QDF_MAC_ADDR_SIZE))
Nirav Shah52d85aa2018-04-26 14:03:00 +05301497 qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
1498 vdev->last_real_peer->mac_addr.raw,
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -08001499 QDF_MAC_ADDR_SIZE);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301500 qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
1501}
1502
1503/**
1504 * ol_txrx_add_last_real_peer() - add last peer
1505 * @pdev: the data physical device
1506 * @vdev: virtual device
1507 * @peer_id: peer id
1508 *
1509 * Return: None
1510 */
1511void
1512ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
1513 struct cdp_vdev *pvdev, uint8_t *peer_id)
1514{
1515 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
1516 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1517 ol_txrx_peer_handle peer;
1518
1519 peer = ol_txrx_find_peer_by_addr(
1520 (struct cdp_pdev *)pdev,
1521 vdev->hl_tdls_ap_mac_addr.raw,
1522 peer_id);
1523
1524 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1525 if (!vdev->last_real_peer && peer &&
1526 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
1527 vdev->last_real_peer = peer;
1528 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1529}
1530
1531/**
1532 * is_vdev_restore_last_peer() - check for vdev last peer
1533 * @peer: peer object
1534 *
1535 * Return: true if last peer is not null
1536 */
1537bool is_vdev_restore_last_peer(void *ppeer)
1538{
1539 struct ol_txrx_peer_t *peer = ppeer;
1540 struct ol_txrx_vdev_t *vdev;
1541
1542 vdev = peer->vdev;
1543 return vdev->last_real_peer && (vdev->last_real_peer == peer);
1544}
1545
1546/**
1547 * ol_txrx_update_last_real_peer() - check for vdev last peer
1548 * @pdev: the data physical device
1549 * @peer: peer device
1550 * @peer_id: peer id
1551 * @restore_last_peer: restore last peer flag
1552 *
1553 * Return: None
1554 */
Rachit Kankane87b16542019-02-28 14:14:36 +05301555void ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *pvdev,
Nirav Shah52d85aa2018-04-26 14:03:00 +05301556 uint8_t *peer_id, bool restore_last_peer)
1557{
1558 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
Rachit Kankane87b16542019-02-28 14:14:36 +05301559 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1560 struct ol_txrx_peer_t *peer;
Nirav Shah52d85aa2018-04-26 14:03:00 +05301561
1562 if (!restore_last_peer)
1563 return;
1564
Nirav Shah52d85aa2018-04-26 14:03:00 +05301565 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
1566 vdev->hl_tdls_ap_mac_addr.raw,
1567 peer_id);
1568
1569 qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1570 if (!vdev->last_real_peer && peer &&
1571 (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
1572 vdev->last_real_peer = peer;
1573 qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1574}
1575#endif
1576
1577#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
1578/**
1579 * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
1580 * @pdev: the physical device object
1581 *
1582 * Return: None
1583 */
1584void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
1585{
1586 qdf_spinlock_create(&pdev->txq_log_spinlock);
1587 pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1588 pdev->txq_log.oldest_record_offset = 0;
1589 pdev->txq_log.offset = 0;
1590 pdev->txq_log.allow_wrap = 1;
1591 pdev->txq_log.wrapped = 0;
1592}
1593
1594/**
1595 * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
1596 * @pdev: the physical device object
1597 *
1598 * Return: None
1599 */
1600void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
1601{
1602 qdf_spinlock_destroy(&pdev->txq_log_spinlock);
1603}
1604#endif
1605
1606#if defined(DEBUG_HL_LOGGING)
1607
1608/**
1609 * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
1610 * @pdev: the physical device object
1611 *
1612 * Return: None
1613 */
1614void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
1615{
1616 qdf_spinlock_create(&pdev->grp_stat_spinlock);
1617 pdev->grp_stats.last_valid_index = -1;
1618 pdev->grp_stats.wrap_around = 0;
1619}
1620
1621/**
1622 * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
1623 * @pdev: the physical device object
1624 *
1625 * Return: None
1626 */
1627void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
1628{
1629 qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
1630}
1631#endif
1632
1633#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1634
1635/**
1636 * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
1637 * @vdev: the virtual device object
1638 * @flag: flag
1639 *
1640 * Return: None
1641 */
1642void
1643ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
1644{
1645 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1646
1647 vdev->hlTdlsFlag = flag;
1648}
1649#endif
1650
1651/**
1652 * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
1653 * @vdev: the virtual device object
1654 *
1655 * Return: None
1656 */
1657void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1658{
1659 uint8_t i;
1660
1661 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1662 TAILQ_INIT(&vdev->txqs[i].head);
1663 vdev->txqs[i].paused_count.total = 0;
1664 vdev->txqs[i].frms = 0;
1665 vdev->txqs[i].bytes = 0;
1666 vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
1667 vdev->txqs[i].flag = ol_tx_queue_empty;
1668 /* aggregation is not applicable for vdev tx queues */
1669 vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
1670 ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
1671 ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
1672 }
1673}
1674
1675/**
1676 * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
1677 * @vdev: the virtual device object
1678 *
1679 * Return: None
1680 */
1681void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1682{
1683 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1684 struct ol_tx_frms_queue_t *txq;
1685 int i;
1686
1687 for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1688 txq = &vdev->txqs[i];
1689 ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
1690 }
1691}
1692
1693/**
1694 * ol_txrx_peer_txqs_init() - initialise peer tx queues
1695 * @pdev: the physical device object
1696 * @peer: peer object
1697 *
1698 * Return: None
1699 */
1700void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1701 struct ol_txrx_peer_t *peer)
1702{
1703 uint8_t i;
1704 struct ol_txrx_vdev_t *vdev = peer->vdev;
1705
1706 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1707 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1708 TAILQ_INIT(&peer->txqs[i].head);
1709 peer->txqs[i].paused_count.total = 0;
1710 peer->txqs[i].frms = 0;
1711 peer->txqs[i].bytes = 0;
1712 peer->txqs[i].ext_tid = i;
1713 peer->txqs[i].flag = ol_tx_queue_empty;
1714 peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1715 ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1716 ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1717 }
1718 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1719
1720 /* aggregation is not applicable for mgmt and non-QoS tx queues */
1721 for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1722 peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1723
1724 ol_txrx_peer_pause(peer);
1725}
1726
1727/**
1728 * ol_txrx_peer_tx_queue_free() - free peer tx queues
1729 * @pdev: the physical device object
1730 * @peer: peer object
1731 *
1732 * Return: None
1733 */
1734void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
1735 struct ol_txrx_peer_t *peer)
1736{
1737 struct ol_tx_frms_queue_t *txq;
1738 uint8_t i;
1739
1740 for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1741 txq = &peer->txqs[i];
1742 ol_tx_queue_free(pdev, txq, i, true);
1743 }
1744}
1745
1746#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1747
1748/**
1749 * ol_txrx_update_group_credit() - update group credit for tx queue
1750 * @group: for which credit needs to be updated
1751 * @credit: credits
1752 * @absolute: TXQ group absolute
1753 *
1754 * Return: allocated pool size
1755 */
1756void ol_txrx_update_group_credit(
1757 struct ol_tx_queue_group_t *group,
1758 int32_t credit,
1759 u_int8_t absolute)
1760{
1761 if (absolute)
1762 qdf_atomic_set(&group->credit, credit);
1763 else
1764 qdf_atomic_add(credit, &group->credit);
1765}
1766
1767/**
1768 * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
1769 * vdev id mask and ac mask is not matching
1770 * @pdev: the data physical device
1771 * @group_id: TXQ group id
1772 * @credit: TXQ group credit count
1773 * @absolute: TXQ group absolute
1774 * @vdev_id_mask: TXQ vdev group id mask
1775 * @ac_mask: TQX access category mask
1776 *
1777 * Return: None
1778 */
1779void ol_txrx_update_tx_queue_groups(
1780 ol_txrx_pdev_handle pdev,
1781 u_int8_t group_id,
1782 int32_t credit,
1783 u_int8_t absolute,
1784 u_int32_t vdev_id_mask,
1785 u_int32_t ac_mask
1786 )
1787{
1788 struct ol_tx_queue_group_t *group;
1789 u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
1790 u_int32_t membership;
1791 struct ol_txrx_vdev_t *vdev;
1792
1793 if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
Nirav Shah7c8c1712018-09-10 16:01:31 +05301794 ol_txrx_warn("invalid group_id=%u, ignore update", group_id);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301795 return;
1796 }
1797
1798 group = &pdev->txq_grps[group_id];
1799
1800 membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
1801
1802 qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1803 /*
1804 * if the membership (vdev id mask and ac mask)
1805 * matches then no need to update tx qeue groups.
1806 */
1807 if (group->membership == membership)
1808 /* Update Credit Only */
1809 goto credit_update;
1810
1811 credit += ol_txrx_distribute_group_credits(pdev, group_id,
1812 vdev_id_mask);
1813 /*
1814 * membership (vdev id mask and ac mask) is not matching
1815 * TODO: ignoring ac mask for now
1816 */
1817 qdf_assert(ac_mask == 0xffff);
1818 group_vdev_id_mask =
1819 OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
1820
1821 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1822 group_vdev_bit_mask =
1823 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
1824 group_vdev_id_mask, vdev->vdev_id);
1825 vdev_bit_mask =
1826 OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
1827 vdev_id_mask, vdev->vdev_id);
1828
1829 if (group_vdev_bit_mask != vdev_bit_mask) {
1830 /*
1831 * Change in vdev tx queue group
1832 */
1833 if (!vdev_bit_mask) {
1834 /* Set Group Pointer (vdev and peer) to NULL */
Nirav Shahaa34cbb2019-07-03 10:32:04 +05301835 ol_txrx_info("Group membership removed for vdev_id %d from group_id %d",
1836 vdev->vdev_id, group_id);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301837 ol_tx_set_vdev_group_ptr(
1838 pdev, vdev->vdev_id, NULL);
1839 } else {
1840 /* Set Group Pointer (vdev and peer) */
Nirav Shahaa34cbb2019-07-03 10:32:04 +05301841 ol_txrx_info("Group membership updated for vdev_id %d to group_id %d",
1842 vdev->vdev_id, group_id);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301843 ol_tx_set_vdev_group_ptr(
1844 pdev, vdev->vdev_id, group);
1845 }
1846 }
1847 }
1848 /* Update membership */
1849 group->membership = membership;
Nirav Shahaa34cbb2019-07-03 10:32:04 +05301850 ol_txrx_info("Group membership updated for group_id %d membership 0x%x",
1851 group_id, group->membership);
Nirav Shah52d85aa2018-04-26 14:03:00 +05301852credit_update:
1853 /* Update Credit */
1854 ol_txrx_update_group_credit(group, credit, absolute);
1855 qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1856}
1857#endif
1858
1859#if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
1860 defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
1861#define MIN_INIT_GROUP_CREDITS 10
1862int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
1863 u8 group_id,
1864 u32 vdevid_mask_new)
1865{
1866 struct ol_tx_queue_group_t *grp = &pdev->txq_grps[group_id];
1867 struct ol_tx_queue_group_t *grp_nxt = &pdev->txq_grps[!group_id];
1868 int creds_nxt = qdf_atomic_read(&grp_nxt->credit);
1869 int vdevid_mask = OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp->membership);
1870 int vdevid_mask_othgrp =
1871 OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp_nxt->membership);
1872 int creds_distribute = 0;
1873
1874 /* if vdev added to the group is the first vdev */
1875 if ((vdevid_mask == 0) && (vdevid_mask_new != 0)) {
1876 /* if other group has members */
1877 if (vdevid_mask_othgrp) {
1878 if (creds_nxt < MIN_INIT_GROUP_CREDITS)
1879 creds_distribute = creds_nxt / 2;
1880 else
1881 creds_distribute = MIN_INIT_GROUP_CREDITS;
1882
1883 ol_txrx_update_group_credit(grp_nxt, -creds_distribute,
1884 0);
1885 } else {
1886 /*
1887 * Other grp has no members, give all credits to this
1888 * grp.
1889 */
1890 creds_distribute =
1891 qdf_atomic_read(&pdev->target_tx_credit);
1892 }
1893 /* if all vdevs are removed from this grp */
1894 } else if ((vdevid_mask != 0) && (vdevid_mask_new == 0)) {
1895 if (vdevid_mask_othgrp)
1896 /* Transfer credits to other grp */
1897 ol_txrx_update_group_credit(grp_nxt,
1898 qdf_atomic_read(&grp->
1899 credit),
1900 0);
1901 /* Set current grp credits to zero */
1902 ol_txrx_update_group_credit(grp, 0, 1);
1903 }
1904
1905 return creds_distribute;
1906}
1907#endif /*
1908 * FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
1909 * FEATURE_HL_DBS_GROUP_CREDIT_SHARING
1910 */
1911
1912#ifdef QCA_HL_NETDEV_FLOW_CONTROL
1913/**
1914 * ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
1915 * @vdev_id: vdev_id
1916 * @flowControl: flow control callback
1917 *
1918 * Return: 0 for success or error code
1919 */
1920int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
1921 tx_pause_callback flowcontrol)
1922{
1923 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1924 u32 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
1925
1926 /*
1927 * Assert if the tx descriptor pool size meets the requirements
1928 * Maximum 2 sessions are allowed on a band.
1929 */
1930 QDF_ASSERT((2 * ol_txrx_tx_desc_alloc_table[TXRX_FC_5GH_80M_2x2] +
1931 ol_txrx_tx_desc_alloc_table[TXRX_FC_2GH_40M_2x2])
1932 <= desc_pool_size);
1933
1934 if (!pdev || !flowcontrol) {
1935 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1936 "pdev or pause_cb is NULL");
1937 return QDF_STATUS_E_INVAL;
1938 }
1939
1940 pdev->pause_cb = flowcontrol;
1941 return 0;
1942}
1943
1944int ol_txrx_set_vdev_os_queue_status(u8 vdev_id,
1945 enum netif_action_type action)
1946{
1947 struct ol_txrx_vdev_t *vdev =
1948 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1949
1950 if (!vdev) {
1951 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1952 "%s: Invalid vdev_id %d", __func__, vdev_id);
1953 return -EINVAL;
1954 }
1955
1956 switch (action) {
1957 case WLAN_NETIF_PRIORITY_QUEUE_ON:
1958 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
1959 vdev->prio_q_paused = 0;
1960 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
1961 break;
1962 case WLAN_WAKE_NON_PRIORITY_QUEUE:
1963 qdf_atomic_set(&vdev->os_q_paused, 0);
1964 break;
1965 default:
1966 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1967 "%s: Invalid action %d", __func__, action);
1968 return -EINVAL;
1969 }
1970 return 0;
1971}
1972
1973/**
1974 * ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
1975 * @vdev_id: vdev id for the vdev under consideration.
1976 * @chan: Channel on which the vdev has been started.
1977 */
1978int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan)
1979{
1980 struct ol_txrx_vdev_t *vdev =
1981 (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1982 enum ol_txrx_fc_limit_id fc_limit_id;
1983 u32 td_limit;
1984
1985 if (!vdev) {
1986 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1987 "%s: Invalid vdev_id %d", __func__, vdev_id);
1988 return -EINVAL;
1989 }
1990
1991 /* TODO: Handle no of spatial streams and channel BW */
1992 if (WLAN_REG_IS_5GHZ_CH(chan))
1993 fc_limit_id = TXRX_FC_5GH_80M_2x2;
1994 else
1995 fc_limit_id = TXRX_FC_2GH_40M_2x2;
1996
1997 qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
1998 td_limit = ol_txrx_tx_desc_alloc_table[fc_limit_id];
1999 vdev->tx_desc_limit = td_limit;
2000 vdev->queue_stop_th = td_limit - TXRX_HL_TX_DESC_HI_PRIO_RESERVED;
2001 vdev->queue_restart_th = td_limit - TXRX_HL_TX_DESC_QUEUE_RESTART_TH;
2002 qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
2003
2004 return 0;
2005}
Nirav Shahaa34cbb2019-07-03 10:32:04 +05302006
2007void ol_tx_dump_flow_pool_info_compact(void *ctx)
2008{
2009 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2010 char *comb_log_str;
2011 int bytes_written = 0;
2012 uint32_t free_size;
2013 struct ol_txrx_vdev_t *vdev;
2014 int i = 0;
2015
2016 free_size = WLAN_MAX_VDEVS * 100;
2017 comb_log_str = qdf_mem_malloc(free_size);
2018 if (!comb_log_str)
2019 return;
2020
2021 qdf_spin_lock_bh(&pdev->tx_mutex);
2022 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2023 bytes_written += snprintf(&comb_log_str[bytes_written],
2024 free_size, "%d (%d,%d)(%d,%d)(%d,%d) |",
2025 vdev->vdev_id, vdev->tx_desc_limit,
2026 qdf_atomic_read(&vdev->tx_desc_count),
2027 qdf_atomic_read(&vdev->os_q_paused),
2028 vdev->prio_q_paused, vdev->queue_stop_th,
2029 vdev->queue_restart_th);
2030 free_size -= bytes_written;
2031 }
2032 qdf_spin_unlock_bh(&pdev->tx_mutex);
2033 qdf_nofl_debug("STATS | FC: %s", comb_log_str);
2034
2035 free_size = WLAN_MAX_VDEVS * 100;
2036 bytes_written = 0;
2037 qdf_mem_zero(comb_log_str, free_size);
2038
2039 bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
2040 "%d ",
2041 qdf_atomic_read(&pdev->target_tx_credit));
2042 for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2043 bytes_written += snprintf(&comb_log_str[bytes_written],
2044 free_size, "|%d, (0x%x, %d)", i,
2045 OL_TXQ_GROUP_VDEV_ID_MASK_GET(
2046 pdev->txq_grps[i].membership),
2047 qdf_atomic_read(
2048 &pdev->txq_grps[i].credit));
2049 free_size -= bytes_written;
2050 }
2051 qdf_nofl_debug("STATS | CREDIT: %s", comb_log_str);
2052 qdf_mem_free(comb_log_str);
2053}
2054
2055void ol_tx_dump_flow_pool_info(void *ctx)
2056{
2057 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2058 struct ol_txrx_vdev_t *vdev;
2059
2060 if (!pdev) {
2061 ol_txrx_err("pdev is NULL");
2062 return;
2063 }
2064
2065 qdf_spin_lock_bh(&pdev->tx_mutex);
2066 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2067 txrx_nofl_info("vdev_id %d", vdev->vdev_id);
2068 txrx_nofl_info("limit %d available %d stop_threshold %d restart_threshold %d",
2069 vdev->tx_desc_limit,
2070 qdf_atomic_read(&vdev->tx_desc_count),
2071 vdev->queue_stop_th, vdev->queue_restart_th);
2072 txrx_nofl_info("q_paused %d prio_q_paused %d",
2073 qdf_atomic_read(&vdev->os_q_paused),
2074 vdev->prio_q_paused);
2075 }
2076 qdf_spin_unlock_bh(&pdev->tx_mutex);
2077}
Nirav Shah52d85aa2018-04-26 14:03:00 +05302078#endif /* QCA_HL_NETDEV_FLOW_CONTROL */