blob: 3aa0311beef890ce470ab8c1eb0b8b710e426056 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Alok Kumar604b0332019-01-24 17:49:25 +05302 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * @file ol_txrx_types.h
21 * @brief Define the major data types used internally by the host datapath SW.
22 */
23#ifndef _OL_TXRX_TYPES__H_
24#define _OL_TXRX_TYPES__H_
25
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053026#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053027#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <cds_queue.h> /* TAILQ */
29#include <a_types.h> /* A_UINT8 */
30#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053031#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053033#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053034#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035#include <pktlog.h> /* ol_pktlog_dev_handle */
36#include <ol_txrx_stats.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include "ol_txrx_htt_api.h"
38#include "ol_htt_tx_api.h"
39#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080040#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -070041#include "ol_txrx_osif_api.h" /* ol_rx_callback */
Dhanashri Atreb08959a2016-03-01 17:28:03 -080042#include "cdp_txrx_flow_ctrl_v2.h"
43#include "cdp_txrx_peer_ops.h"
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053044#include <qdf_trace.h>
Tiger Yue40e7832019-04-25 10:46:53 +080045#include "qdf_hrtimer.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47/*
48 * The target may allocate multiple IDs for a peer.
49 * In particular, the target may allocate one ID to represent the
50 * multicast key the peer uses, and another ID to represent the
51 * unicast key the peer uses.
52 */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -070053#define MAX_NUM_PEER_ID_PER_PEER 16
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080055/* OL_TXRX_NUM_EXT_TIDS -
56 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
57 */
58#define OL_TXRX_NUM_EXT_TIDS 19
59
60#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
61#define OL_TX_NON_QOS_TID 16
62#define OL_TX_MGMT_TID 17
63#define OL_TX_NUM_TIDS 18
64#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
65
Houston Hoffman43d47fa2016-02-24 16:34:30 -080066#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
67#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080068#define OL_TX_VDEV_NUM_QUEUES 2
69
70#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
71#define OL_TXRX_MGMT_NUM_TYPES 8
72
Anurag Chouhana37b5b72016-02-21 14:53:42 +053073#define OL_TX_MUTEX_TYPE qdf_spinlock_t
74#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075
76/* TXRX Histogram defines */
77#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
78#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
79
gbian016a42e2017-03-01 18:49:11 +080080#define OL_TXRX_INVALID_VDEV_ID (-1)
Srinivas Girigowda793fd052019-02-28 15:41:16 -080081#define ETHERTYPE_OCB_TX 0x8151
82#define ETHERTYPE_OCB_RX 0x8152
gbian016a42e2017-03-01 18:49:11 +080083
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080084struct ol_txrx_pdev_t;
85struct ol_txrx_vdev_t;
86struct ol_txrx_peer_t;
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088/* rx filter related */
89#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
90
91enum privacy_filter {
92 PRIVACY_FILTER_ALWAYS,
93 PRIVACY_FILTER_KEY_UNAVAILABLE,
94};
95
96enum privacy_filter_packet_type {
97 PRIVACY_FILTER_PACKET_UNICAST,
98 PRIVACY_FILTER_PACKET_MULTICAST,
99 PRIVACY_FILTER_PACKET_BOTH
100};
101
102struct privacy_exemption {
103 /* ethertype -
104 * type of ethernet frames this filter applies to, in host byte order
105 */
106 uint16_t ether_type;
107 enum privacy_filter filter_type;
108 enum privacy_filter_packet_type packet_type;
109};
110
111enum ol_tx_frm_type {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700112 OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
113 OL_TX_FRM_TSO, /* TSO segment, with a modified IP header added */
114 OL_TX_FRM_AUDIO, /* audio frames, with a custom LLC/SNAP hdr added */
115 OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
gbiane55c9562016-11-01 14:47:47 +0800116 ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800117};
118
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
120
121#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
122
123enum ol_tx_peer_bal_state {
124 ol_tx_peer_bal_enable = 0,
125 ol_tx_peer_bal_disable,
126};
127
128enum ol_tx_peer_bal_timer_state {
129 ol_tx_peer_bal_timer_disable = 0,
130 ol_tx_peer_bal_timer_active,
131 ol_tx_peer_bal_timer_inactive,
132};
133
134struct ol_tx_limit_peer_t {
135 u_int16_t limit_flag;
136 u_int16_t peer_id;
137 u_int16_t limit;
138};
139
140enum tx_peer_level {
141 TXRX_IEEE11_B = 0,
142 TXRX_IEEE11_A_G,
143 TXRX_IEEE11_N,
144 TXRX_IEEE11_AC,
Will Huang03cb2ab2017-06-22 10:42:01 +0800145 TXRX_IEEE11_AX,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530146 TXRX_IEEE11_MAX,
147};
148
149struct tx_peer_threshold {
150 u_int32_t tput_thresh;
151 u_int32_t tx_limit;
152};
153#endif
154
155
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530157 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800158 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530160 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530162 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530163 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 enum htt_tx_status status;
165
166#ifdef QCA_COMPUTE_TX_DELAY
167 uint32_t entry_timestamp_ticks;
168#endif
Rakshith Suresh Patkar384a28a2018-11-02 16:43:43 +0530169
170#ifdef DESC_TIMESTAMP_DEBUG_INFO
171 struct {
172 uint64_t prev_tx_ts;
173 uint64_t curr_tx_ts;
174 uint64_t last_comp_ts;
175 } desc_debug_info;
176#endif
177
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800178 /*
179 * Allow tx descriptors to be stored in (doubly-linked) lists.
180 * This is mainly used for HL tx queuing and scheduling, but is
181 * also used by LL+HL for batch processing of tx frames.
182 */
183 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
184
185 /*
186 * Remember whether the tx frame is a regular packet, or whether
187 * the driver added extra header fragments (e.g. a modified IP header
188 * for TSO fragments, or an added LLC/SNAP header for audio interworking
189 * data) that need to be handled in a special manner.
190 * This field is filled in with the ol_tx_frm_type enum.
191 */
192 uint8_t pkt_type;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530193
gbian016a42e2017-03-01 18:49:11 +0800194 u_int8_t vdev_id;
195
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530196 struct ol_txrx_vdev_t *vdev;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530197
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530198 void *txq;
199
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
Yun Park4a2be572017-04-09 10:03:41 -0700201 /*
202 * used by tx encap, to restore the os buf start offset
203 * after tx complete
204 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 uint8_t orig_l2_hdr_bytes;
206#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530207
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208#ifdef QCA_LL_TX_FLOW_CONTROL_V2
209 struct ol_tx_flow_pool_t *pool;
210#endif
211 void *tso_desc;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530212 void *tso_num_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213};
214
215typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
216
217union ol_tx_desc_list_elem_t {
218 union ol_tx_desc_list_elem_t *next;
219 struct ol_tx_desc_t tx_desc;
220};
221
222union ol_txrx_align_mac_addr_t {
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -0800223 uint8_t raw[QDF_MAC_ADDR_SIZE];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800224 struct {
225 uint16_t bytes_ab;
226 uint16_t bytes_cd;
227 uint16_t bytes_ef;
228 } align2;
229 struct {
230 uint32_t bytes_abcd;
231 uint16_t bytes_ef;
232 } align4;
233};
234
235struct ol_rx_reorder_timeout_list_elem_t {
236 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
237 reorder_timeout_list_elem;
238 uint32_t timestamp_ms;
239 struct ol_txrx_peer_t *peer;
240 uint8_t tid;
241 uint8_t active;
242};
243
Jeff Johnson8feaa632018-12-07 11:56:02 -0800244/* wait on peer deletion timeout value in milliseconds */
245#define PEER_DELETION_TIMEOUT 500
246
247enum txrx_wmm_ac {
248 TXRX_WMM_AC_BE,
249 TXRX_WMM_AC_BK,
250 TXRX_WMM_AC_VI,
251 TXRX_WMM_AC_VO,
252
253 TXRX_NUM_WMM_AC
254};
255
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800256#define TXRX_TID_TO_WMM_AC(_tid) ( \
257 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
258 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
259 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
260 TXRX_WMM_AC_BE)
261
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530262enum {
263 OL_TX_SCHED_WRR_ADV_CAT_BE,
264 OL_TX_SCHED_WRR_ADV_CAT_BK,
265 OL_TX_SCHED_WRR_ADV_CAT_VI,
266 OL_TX_SCHED_WRR_ADV_CAT_VO,
267 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
268 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
269 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
270 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
271
272 OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
273};
274
hquc7f560c2017-06-26 17:14:37 +0800275A_COMPILE_TIME_ASSERT(ol_tx_sched_htt_ac_values,
276 /* check that regular WMM AC enum values match */
277 ((int)OL_TX_SCHED_WRR_ADV_CAT_VO == (int)HTT_AC_WMM_VO) &&
278 ((int)OL_TX_SCHED_WRR_ADV_CAT_VI == (int)HTT_AC_WMM_VI) &&
279 ((int)OL_TX_SCHED_WRR_ADV_CAT_BK == (int)HTT_AC_WMM_BK) &&
280 ((int)OL_TX_SCHED_WRR_ADV_CAT_BE == (int)HTT_AC_WMM_BE) &&
281
282 /* check that extension AC enum values match */
283 ((int)OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA
284 == (int)HTT_AC_EXT_NON_QOS) &&
285 ((int)OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT
286 == (int)HTT_AC_EXT_UCAST_MGMT) &&
287 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA
288 == (int)HTT_AC_EXT_MCAST_DATA) &&
289 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT
290 == (int)HTT_AC_EXT_MCAST_MGMT));
291
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292struct ol_tx_reorder_cat_timeout_t {
293 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530294 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295 uint32_t duration_ms;
296 struct ol_txrx_pdev_t *pdev;
297};
298
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530299enum ol_tx_scheduler_status {
300 ol_tx_scheduler_idle = 0,
301 ol_tx_scheduler_running,
302};
303
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304enum ol_tx_queue_status {
305 ol_tx_queue_empty = 0,
306 ol_tx_queue_active,
307 ol_tx_queue_paused,
308};
309
310struct ol_txrx_msdu_info_t {
311 struct htt_msdu_info_t htt;
312 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530313 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800314};
315
316enum {
317 ol_tx_aggr_untried = 0,
318 ol_tx_aggr_enabled,
319 ol_tx_aggr_disabled,
320 ol_tx_aggr_retry,
321 ol_tx_aggr_in_progress,
322};
323
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530324#define OL_TX_MAX_GROUPS_PER_QUEUE 1
325#define OL_TX_MAX_VDEV_ID 16
326#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
327 (((_membership) & 0xffff0000) >> 16)
328#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
329 ((_mask >> _vdev_id) & 0x01)
330#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
331 ((_membership) & 0x0000ffff)
332#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
333 ((_mask >> _ac_mask) & 0x01)
334#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
335 ((_vdev_mask << 16) | _ac_mask)
336
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800337struct ol_tx_frms_queue_t {
338 /* list_elem -
339 * Allow individual tx frame queues to be linked together into
340 * scheduler queues of tx frame queues
341 */
342 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
343 uint8_t aggr_state;
344 struct {
345 uint8_t total;
346 /* pause requested by ctrl SW rather than txrx SW */
347 uint8_t by_ctrl;
348 } paused_count;
349 uint8_t ext_tid;
350 uint16_t frms;
351 uint32_t bytes;
352 ol_tx_desc_list head;
353 enum ol_tx_queue_status flag;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
355#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
356 struct ol_txrx_peer_t *peer;
357#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800358};
359
360enum {
361 ol_tx_log_entry_type_invalid,
362 ol_tx_log_entry_type_queue_state,
363 ol_tx_log_entry_type_enqueue,
364 ol_tx_log_entry_type_dequeue,
365 ol_tx_log_entry_type_drop,
366 ol_tx_log_entry_type_queue_free,
367
368 ol_tx_log_entry_type_wrap,
369};
370
371struct ol_tx_log_queue_state_var_sz_t {
372 uint32_t active_bitmap;
373 uint16_t credit;
374 uint8_t num_cats_active;
375 uint8_t data[1];
376};
377
378struct ol_tx_log_queue_add_t {
379 uint8_t num_frms;
380 uint8_t tid;
381 uint16_t peer_id;
382 uint16_t num_bytes;
383};
384
385struct ol_mac_addr {
Srinivas Girigowdaa47b45f2019-02-27 12:29:02 -0800386 uint8_t mac_addr[QDF_MAC_ADDR_SIZE];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800387};
388
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530389struct ol_tx_sched_t;
390
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391#ifndef ol_txrx_local_peer_id_t
392#define ol_txrx_local_peer_id_t uint8_t /* default */
393#endif
394
395#ifdef QCA_COMPUTE_TX_DELAY
396/*
397 * Delay histogram bins: 16 bins of 10 ms each to count delays
398 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
399 */
400#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
401#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
402
403struct ol_tx_delay_data {
404 struct {
405 uint64_t transmit_sum_ticks;
406 uint64_t queue_sum_ticks;
407 uint32_t transmit_num;
408 uint32_t queue_num;
409 } avgs;
410 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
411};
412
413#endif /* QCA_COMPUTE_TX_DELAY */
414
415/* Thermal Mitigation */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416enum throttle_phase {
417 THROTTLE_PHASE_OFF,
418 THROTTLE_PHASE_ON,
419 /* Invalid */
420 THROTTLE_PHASE_MAX,
421};
422
423#define THROTTLE_TX_THRESHOLD (100)
424
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530425/*
426 * Threshold to stop/start priority queue in term of % the actual flow start
427 * and stop thresholds. When num of available descriptors falls below
428 * stop_priority_th, priority queue will be paused. When num of available
429 * descriptors are greater than start_priority_th, priority queue will be
430 * un-paused.
431 */
432#define TX_PRIORITY_TH (80)
433
434/*
435 * No of maximum descriptor used by TSO jumbo packet with
436 * 64K aggregation.
437 */
438#define MAX_TSO_SEGMENT_DESC (44)
439
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530440struct ol_tx_queue_group_t {
441 qdf_atomic_t credit;
442 u_int32_t membership;
Ajit Pal Singh43ad30d2018-05-07 18:55:30 +0530443 int frm_count;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530444};
445#define OL_TX_MAX_TXQ_GROUPS 2
446
447#define OL_TX_GROUP_STATS_LOG_SIZE 128
448struct ol_tx_group_credit_stats_t {
449 struct {
450 struct {
451 u_int16_t member_vdevs;
452 u_int16_t credit;
453 } grp[OL_TX_MAX_TXQ_GROUPS];
454 } stats[OL_TX_GROUP_STATS_LOG_SIZE];
455 u_int16_t last_valid_index;
456 u_int16_t wrap_around;
457};
458
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800459
hangtianb9c91362019-06-07 10:39:38 +0800460#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461/**
462 * enum flow_pool_status - flow pool status
463 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
464 * and network queues are unpaused
465 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
466 * and network queues are paused
467 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
468 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530469 * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 */
471enum flow_pool_status {
472 FLOW_POOL_ACTIVE_UNPAUSED = 0,
473 FLOW_POOL_ACTIVE_PAUSED = 1,
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530474 FLOW_POOL_NON_PRIO_PAUSED = 2,
475 FLOW_POOL_INVALID = 3,
476 FLOW_POOL_INACTIVE = 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477};
478
479/**
480 * struct ol_txrx_pool_stats - flow pool related statistics
481 * @pool_map_count: flow pool map received
482 * @pool_unmap_count: flow pool unmap received
Nirav Shaha3cc7192018-03-23 00:03:24 +0530483 * @pool_resize_count: flow pool resize command received
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 */
486struct ol_txrx_pool_stats {
487 uint16_t pool_map_count;
488 uint16_t pool_unmap_count;
Nirav Shaha3cc7192018-03-23 00:03:24 +0530489 uint16_t pool_resize_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490 uint16_t pkt_drop_no_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491};
492
493/**
494 * struct ol_tx_flow_pool_t - flow_pool info
495 * @flow_pool_list_elem: flow_pool_list element
496 * @flow_pool_lock: flow_pool lock
497 * @flow_pool_id: flow_pool id
498 * @flow_pool_size: flow_pool size
499 * @avail_desc: available descriptors
500 * @deficient_desc: deficient descriptors
Nirav Shaha3cc7192018-03-23 00:03:24 +0530501 * @overflow_desc: overflow descriptors
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 * @status: flow pool status
503 * @flow_type: flow pool type
504 * @member_flow_id: member flow id
505 * @stop_th: stop threshold
506 * @start_th: start threshold
507 * @freelist: tx descriptor freelist
Nirav Shahda008342016-05-17 18:50:40 +0530508 * @pkt_drop_no_desc: drop due to no descriptors
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530509 * @ref_cnt: pool's ref count
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530510 * @stop_priority_th: Threshold to stop priority queue
511 * @start_priority_th: Threshold to start priority queue
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800512 */
513struct ol_tx_flow_pool_t {
514 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530515 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516 uint8_t flow_pool_id;
517 uint16_t flow_pool_size;
518 uint16_t avail_desc;
519 uint16_t deficient_desc;
Nirav Shaha3cc7192018-03-23 00:03:24 +0530520 uint16_t overflow_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521 enum flow_pool_status status;
522 enum htt_flow_type flow_type;
523 uint8_t member_flow_id;
524 uint16_t stop_th;
525 uint16_t start_th;
526 union ol_tx_desc_list_elem_t *freelist;
Nirav Shahda008342016-05-17 18:50:40 +0530527 uint16_t pkt_drop_no_desc;
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530528 qdf_atomic_t ref_cnt;
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530529 uint16_t stop_priority_th;
530 uint16_t start_priority_th;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531};
hangtianb9c91362019-06-07 10:39:38 +0800532#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533
Rakshith Suresh Patkar5f9efa32019-02-04 13:26:56 +0530534#define OL_TXRX_INVALID_PEER_UNMAP_COUNT 0xF
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700535/*
536 * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
537 * @peer: Pointer to peer object
538 * @peer_id_ref_cnt: No. of firmware references to the peer_id
539 * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
540 * after the peer object is deleted on the host.
541 *
542 * peer_id is used as an index into the array of ol_txrx_peer_id_map.
543 */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530544struct ol_txrx_peer_id_map {
545 struct ol_txrx_peer_t *peer;
546 qdf_atomic_t peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700547 qdf_atomic_t del_peer_id_ref_cnt;
Alok Kumar604b0332019-01-24 17:49:25 +0530548 qdf_atomic_t peer_id_unmap_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530549};
550
tfyu9fcabd72017-09-26 17:46:48 +0800551/**
552 * ol_txrx_stats_req_internal - specifications of the requested
553 * statistics internally
554 */
555struct ol_txrx_stats_req_internal {
556 struct ol_txrx_stats_req base;
557 TAILQ_ENTRY(ol_txrx_stats_req_internal) req_list_elem;
558 int serviced; /* state of this request */
559 int offset;
560};
561
jitiphil335d2412018-06-07 22:49:24 +0530562struct ol_txrx_fw_stats_desc_t {
563 struct ol_txrx_stats_req_internal *req;
564 unsigned char desc_id;
565};
566
567struct ol_txrx_fw_stats_desc_elem_t {
568 struct ol_txrx_fw_stats_desc_elem_t *next;
569 struct ol_txrx_fw_stats_desc_t desc;
570};
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530571
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572/*
573 * As depicted in the diagram below, the pdev contains an array of
574 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
575 * Each element identifies all the tx queues that are active for
576 * the TID, from the different peers.
577 *
578 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
579 * Each element identifies the tx frames for the TID that need to be sent
580 * to the peer.
581 *
582 *
583 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
584 * TID
585 * 0 1 2 17
586 * +============+============+============+== ==+============+
587 * | active (y) | active (n) | active (n) | | active (y) |
588 * |------------+------------+------------+-- --+------------|
589 * | queues | queues | queues | | queues |
590 * +============+============+============+== ==+============+
591 * | |
592 * .--+-----------------------------------------------'
593 * | |
594 * | | peer X: peer Y:
595 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
596 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
597 * | | TID +======+ TID +======+
598 * | `---->| next |-------------------------->| next |--X
599 * | 0 | prev | .------. .------. 0 | prev | .------.
600 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
601 * | +======+ `------' `------' +======+ `------'
602 * | | next | | | 1 | next | |
603 * | 1 | prev | v v | prev | v
604 * | | txq | .------. .------. | txq | .------.
605 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
606 * | | next | `------' `------' | next | `------'
607 * | 2 | prev | 2 | prev |
608 * | | txq | | txq |
609 * | +======+ +======+
610 * | | | | |
611 * |
612 * |
613 * | | | | |
614 * | +======+ +======+
615 * `------->| next |--X | next |
616 * 17 | prev | .------. 17 | prev |
617 * | txq |-->|txdesc| | txq |
618 * +======+ `------' +======+
619 * |
620 * v
621 * .------.
622 * |netbuf|
623 * `------'
624 */
625struct ol_txrx_pdev_t {
626 /* ctrl_pdev - handle for querying config info */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800627 struct cdp_cfg *ctrl_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800628
629 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530630 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631
632 htt_pdev_handle htt_pdev;
633
634#ifdef WLAN_FEATURE_FASTPATH
635 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
636 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
637#endif /* WLAN_FEATURE_FASTPATH */
638
639 struct {
640 int is_high_latency;
641 int host_addba;
642 int ll_pause_txq_limit;
643 int default_tx_comp_req;
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530644 u8 credit_update_enabled;
645 u8 request_tx_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800646 } cfg;
647
648 /* WDI subscriber's event list */
649 wdi_event_subscribe **wdi_event_list;
650
Komal Seelamc4b28632016-02-03 15:02:18 +0530651#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
652 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653 /* Pktlog pdev */
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700654 struct pktlog_dev_t *pl_dev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655#endif /* #ifndef REMOVE_PKT_LOG */
656
chenguo2201c0a2018-11-15 18:07:41 +0800657 /* Monitor mode interface*/
658 struct ol_txrx_vdev_t *monitor_vdev;
659
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800660 enum ol_sec_type sec_types[htt_num_sec_types];
661 /* standard frame type */
662 enum wlan_frm_fmt frame_format;
663 enum htt_pkt_type htt_pkt_type;
664
665#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
666 /* txrx encap/decap */
667 uint8_t sw_tx_encap;
668 uint8_t sw_rx_decap;
669 uint8_t target_tx_tran_caps;
670 uint8_t target_rx_tran_caps;
671 /* llc process */
672 uint8_t sw_tx_llc_proc_enable;
673 uint8_t sw_rx_llc_proc_enable;
674 /* A-MSDU */
675 uint8_t sw_subfrm_hdr_recovery_enable;
676 /* Protected Frame bit handling */
677 uint8_t sw_pf_proc_enable;
678#endif
679 /*
680 * target tx credit -
681 * not needed for LL, but used for HL download scheduler to keep
682 * track of roughly how much space is available in the target for
683 * tx frames
684 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530685 qdf_atomic_t target_tx_credit;
686 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800687
jitiphil335d2412018-06-07 22:49:24 +0530688 struct {
689 uint16_t pool_size;
690 struct ol_txrx_fw_stats_desc_elem_t *pool;
691 struct ol_txrx_fw_stats_desc_elem_t *freelist;
692 qdf_spinlock_t pool_lock;
693 qdf_atomic_t initialized;
694 } ol_txrx_fw_stats_desc_pool;
695
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696 /* Peer mac address to staid mapping */
697 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
698
699 /* ol_txrx_vdev list */
700 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
701
tfyu9fcabd72017-09-26 17:46:48 +0800702 TAILQ_HEAD(, ol_txrx_stats_req_internal) req_list;
703 int req_list_depth;
704 qdf_spinlock_t req_list_spinlock;
705
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706 /* peer ID to peer object map (array of pointers to peer objects) */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530707 struct ol_txrx_peer_id_map *peer_id_to_obj_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800708
709 struct {
Yun Park4a2be572017-04-09 10:03:41 -0700710 unsigned int mask;
711 unsigned int idx_bits;
712
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800713 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
714 } peer_hash;
715
716 /* rx specific processing */
717 struct {
718 struct {
719 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
720 uint32_t timeout_ms;
721 } defrag;
722 struct {
723 int defrag_timeout_check;
724 int dup_check;
725 } flags;
726
727 struct {
728 struct ol_tx_reorder_cat_timeout_t
729 access_cats[TXRX_NUM_WMM_AC];
730 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530731 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800732 } rx;
733
734 /* rx proc function */
735 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
736 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -0700737 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800738
739 /* tx data delivery notification callback function */
740 struct {
741 ol_txrx_data_tx_cb func;
742 void *ctxt;
743 } tx_data_callback;
744
745 /* tx management delivery notification callback functions */
746 struct {
Sravan Kumar Kairam905b4c52017-10-17 19:38:14 +0530747 ol_txrx_mgmt_tx_cb download_cb;
748 ol_txrx_mgmt_tx_cb ota_ack_cb;
749 void *ctxt;
750 } tx_mgmt_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800751
Poddar, Siddarth34872782017-08-10 14:08:51 +0530752 data_stall_detect_cb data_stall_detect_callback;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530753 /* packetdump callback functions */
Lin Bai1a73a412018-12-13 16:40:14 +0800754 ol_txrx_pktdump_cb ol_tx_packetdump_cb;
755 ol_txrx_pktdump_cb ol_rx_packetdump_cb;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530756
Yu Wangceb357b2017-06-01 12:04:18 +0800757#ifdef WLAN_FEATURE_TSF_PLUS
758 tp_ol_timestamp_cb ol_tx_timestamp_cb;
759#endif
760
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 struct {
762 uint16_t pool_size;
763 uint16_t num_free;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530764 union ol_tx_desc_list_elem_t *array;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800765 union ol_tx_desc_list_elem_t *freelist;
766#ifdef QCA_LL_TX_FLOW_CONTROL_V2
767 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530768 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800769 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
770#endif
Leo Chang376398b2015-10-23 14:19:02 -0700771 uint32_t page_size;
772 uint16_t desc_reserved_size;
773 uint8_t page_divider;
774 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530775 struct qdf_mem_multi_page_t desc_pages;
Nirav Shah76291962016-04-25 10:50:37 +0530776#ifdef DESC_DUP_DETECT_DEBUG
Houston Hoffman088e4b92016-09-01 13:51:06 -0700777 unsigned long *free_list_bitmap;
Nirav Shah76291962016-04-25 10:50:37 +0530778#endif
hangtianb9c91362019-06-07 10:39:38 +0800779#ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
hangtian72704802019-04-17 18:16:25 +0800780 uint16_t stop_th;
781 uint16_t start_th;
782 uint16_t stop_priority_th;
783 uint16_t start_priority_th;
784 enum flow_pool_status status;
hangtianb9c91362019-06-07 10:39:38 +0800785#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800786 } tx_desc;
787
Nirav Shah22bf44d2015-12-10 15:39:48 +0530788 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
790 struct ol_txrx_pool_stats pool_stats;
791 uint32_t num_msdu_desc;
792#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
793 struct ol_tx_flow_pool_t *mgmt_pool;
794#endif
795#endif
796
797 struct {
798 int (*cmp)(union htt_rx_pn_t *new,
799 union htt_rx_pn_t *old,
800 int is_unicast, int opmode);
801 int len;
802 } rx_pn[htt_num_sec_types];
803
804 /* tx mutex */
805 OL_TX_MUTEX_TYPE tx_mutex;
806
807 /*
808 * peer ref mutex:
809 * 1. Protect peer object lookups until the returned peer object's
810 * reference count is incremented.
811 * 2. Provide mutex when accessing peer object lookup structures.
812 */
813 OL_RX_MUTEX_TYPE peer_ref_mutex;
814
815 /*
816 * last_real_peer_mutex:
817 * Protect lookups of any vdev's last_real_peer pointer until the
818 * reference count for the pointed-to peer object is incremented.
819 * This mutex could be in the vdev struct, but it's slightly simpler
820 * to have a single lock in the pdev struct. Since the lock is only
821 * held for an extremely short time, and since it's very unlikely for
822 * two vdev's to concurrently access the lock, there's no real
823 * benefit to having a per-vdev lock.
824 */
825 OL_RX_MUTEX_TYPE last_real_peer_mutex;
826
Mohit Khanna37ffb292016-08-08 16:20:01 -0700827 qdf_spinlock_t peer_map_unmap_lock;
828
Alok Kumar604b0332019-01-24 17:49:25 +0530829 ol_txrx_peer_unmap_sync_cb peer_unmap_sync_cb;
830
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800831 struct {
832 struct {
833 struct {
834 struct {
835 uint64_t ppdus;
836 uint64_t mpdus;
837 } normal;
838 struct {
839 /*
840 * mpdu_bad is general -
841 * replace it with the specific counters
842 * below
843 */
844 uint64_t mpdu_bad;
845 /* uint64_t mpdu_fcs; */
846 /* uint64_t mpdu_duplicate; */
847 /* uint64_t mpdu_pn_replay; */
848 /* uint64_t mpdu_bad_sender; */
849 /* ^ comment: peer not found */
850 /* uint64_t mpdu_flushed; */
851 /* uint64_t msdu_defrag_mic_err; */
852 uint64_t msdu_mc_dup_drop;
853 } err;
854 } rx;
855 } priv;
856 struct ol_txrx_stats pub;
857 } stats;
858
859#if defined(ENABLE_RX_REORDER_TRACE)
860 struct {
861 uint32_t mask;
862 uint32_t idx;
863 uint64_t cnt;
864#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
865 struct {
866 uint16_t reorder_idx;
867 uint16_t seq_num;
868 uint8_t num_mpdus;
869 uint8_t tid;
870 } *data;
871 } rx_reorder_trace;
872#endif /* ENABLE_RX_REORDER_TRACE */
873
874#if defined(ENABLE_RX_PN_TRACE)
875 struct {
876 uint32_t mask;
877 uint32_t idx;
878 uint64_t cnt;
879#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
880 struct {
881 struct ol_txrx_peer_t *peer;
882 uint32_t pn32;
883 uint16_t seq_num;
884 uint8_t unicast;
885 uint8_t tid;
886 } *data;
887 } rx_pn_trace;
888#endif /* ENABLE_RX_PN_TRACE */
889
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800890 /*
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530891 * tx_sched only applies for HL, but is defined unconditionally
892 * rather than only if defined(CONFIG_HL_SUPPORT).
893 * This is because the struct only
894 * occupies a few bytes, and to avoid the complexity of
895 * wrapping references
896 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
897 * compilation.
898 * If this struct gets expanded to a non-trivial size,
899 * then it should be
900 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
901 */
902 qdf_spinlock_t tx_queue_spinlock;
903 struct {
904 enum ol_tx_scheduler_status tx_sched_status;
905 struct ol_tx_sched_t *scheduler;
906 struct ol_tx_frms_queue_t *last_used_txq;
907 } tx_sched;
908 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800909 * tx_queue only applies for HL, but is defined unconditionally to avoid
910 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
911 * conditional compilation.
912 */
913 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530914 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800915 /* threshold_lo - when to start tx desc margin replenishment */
916 uint16_t rsrc_threshold_lo;
Yun Park4a2be572017-04-09 10:03:41 -0700917 /*
918 * threshold_hi - where to stop during tx desc margin
919 * replenishment
920 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921 uint16_t rsrc_threshold_hi;
922 } tx_queue;
923
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530924#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
925#define OL_TXQ_LOG_SIZE 512
926 qdf_spinlock_t txq_log_spinlock;
927 struct {
928 int size;
929 int oldest_record_offset;
930 int offset;
931 int allow_wrap;
932 u_int32_t wrapped;
933 /* aligned to u_int32_t boundary */
934 u_int8_t data[OL_TXQ_LOG_SIZE];
935 } txq_log;
936#endif
937
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800938#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530939 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800940#endif
941
942 int rssi_update_shift;
943 int rssi_new_weight;
944#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
945 struct {
946 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
947 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530948 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800949 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
950 } local_peer_ids;
951#endif
952
953#ifdef QCA_COMPUTE_TX_DELAY
954#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
955#define QCA_TX_DELAY_NUM_CATEGORIES \
956 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
957#else
958#define QCA_TX_DELAY_NUM_CATEGORIES 1
959#endif
960 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530961 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800962 struct {
963 struct ol_tx_delay_data copies[2]; /* ping-pong */
964 int in_progress_idx;
965 uint32_t avg_start_time_ticks;
966 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
967 uint32_t tx_compl_timestamp_ticks;
968 uint32_t avg_period_ticks;
969 uint32_t hist_internal_bin_width_mult;
970 uint32_t hist_internal_bin_width_shift;
971 } tx_delay;
972
973 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
974 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
975
976#endif /* QCA_COMPUTE_TX_DELAY */
977
978 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530979 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800980 /* timer used to monitor the throttle "on" phase and
Yun Park4a2be572017-04-09 10:03:41 -0700981 * "off" phase
982 */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530983 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800984 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530985 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800986 /* This is the time in ms of the throttling window, it will
Yun Park4a2be572017-04-09 10:03:41 -0700987 * include an "on" phase and an "off" phase
988 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989 uint32_t throttle_period_ms;
990 /* Current throttle level set by the client ex. level 0,
Yun Park4a2be572017-04-09 10:03:41 -0700991 * level 1, etc
992 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800993 enum throttle_level current_throttle_level;
994 /* Index that points to the phase within the throttle period */
995 enum throttle_phase current_throttle_phase;
996 /* Maximum number of frames to send to the target at one time */
997 uint32_t tx_threshold;
998 /* stores time in ms of on/off phase for each throttle level */
999 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
1000 /* mark true if traffic is paused due to thermal throttling */
1001 bool is_paused;
1002 } tx_throttle;
1003
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004#if defined(FEATURE_TSO)
1005 struct {
1006 uint16_t pool_size;
1007 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301008 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001009 /* tso mutex */
1010 OL_TX_MUTEX_TYPE tso_mutex;
1011 } tso_seg_pool;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05301012 struct {
1013 uint16_t num_seg_pool_size;
1014 uint16_t num_free;
1015 struct qdf_tso_num_seg_elem_t *freelist;
1016 /* tso mutex */
1017 OL_TX_MUTEX_TYPE tso_num_seg_mutex;
1018 } tso_num_seg_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001019#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301020
1021#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1022 struct {
1023 enum ol_tx_peer_bal_state enabled;
1024 qdf_spinlock_t mutex;
1025 /* timer used to trigger more frames for bad peers */
1026 qdf_timer_t peer_bal_timer;
1027 /*This is the time in ms of the peer balance timer period */
1028 u_int32_t peer_bal_period_ms;
1029 /*This is the txq limit */
1030 u_int32_t peer_bal_txq_limit;
1031 /*This is the state of the peer balance timer */
1032 enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
1033 /*This is the counter about active peers which are under
Yun Park4a2be572017-04-09 10:03:41 -07001034 *tx flow control
1035 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301036 u_int32_t peer_num;
1037 /*This is peer list which are under tx flow control */
1038 struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
1039 /*This is threshold configurationl */
1040 struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
1041 } tx_peer_bal;
1042#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
1043
1044 struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
Ajit Pal Singhb06e0522018-06-15 09:04:27 +05301045#if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
1046 defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
1047 bool limit_lend;
1048 u16 min_reserve;
1049#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301050#ifdef DEBUG_HL_LOGGING
1051 qdf_spinlock_t grp_stat_spinlock;
1052 struct ol_tx_group_credit_stats_t grp_stats;
1053#endif
1054 int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001055 uint8_t ocb_peer_valid;
1056 struct ol_txrx_peer_t *ocb_peer;
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001057 tx_pause_callback pause_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001059 void (*offld_flush_cb)(void *);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001060 struct ol_txrx_peer_t *self_peer;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07001061 qdf_work_t peer_unmap_timer_work;
Yun Parkb4f591d2017-03-29 15:51:01 -07001062
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301063 /* dp debug fs */
1064 struct dentry *dpt_stats_log_dir;
1065 enum qdf_dpt_debugfs_state state;
1066 struct qdf_debugfs_fops dpt_debugfs_fops;
1067
Yun Parkb4f591d2017-03-29 15:51:01 -07001068#ifdef IPA_OFFLOAD
1069 ipa_uc_op_cb_type ipa_uc_op_cb;
1070 void *usr_ctxt;
1071 struct ol_txrx_ipa_resources ipa_resource;
1072#endif /* IPA_UC_OFFLOAD */
jitiphilebf3a922018-11-05 14:25:00 +05301073 bool new_htt_msg_format;
Alok Kumar604b0332019-01-24 17:49:25 +05301074 uint8_t peer_id_unmap_ref_cnt;
Alok Kumare1977442018-11-28 17:16:03 +05301075 bool enable_peer_unmap_conf_support;
Jiani Liu6d3b6a12019-05-08 15:15:06 +08001076 bool enable_tx_compl_tsf64;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001077};
1078
Tiger Yue40e7832019-04-25 10:46:53 +08001079#define OL_TX_HL_DEL_ACK_HASH_SIZE 256
1080
1081/**
1082 * enum ol_tx_hl_packet_type - type for tcp packet
1083 * @TCP_PKT_ACK: TCP ACK frame
1084 * @TCP_PKT_NO_ACK: TCP frame, but not the ack
1085 * @NO_TCP_PKT: Not the TCP frame
1086 */
1087enum ol_tx_hl_packet_type {
1088 TCP_PKT_ACK,
1089 TCP_PKT_NO_ACK,
1090 NO_TCP_PKT
1091};
1092
1093/**
1094 * struct packet_info - tcp packet information
1095 */
1096struct packet_info {
1097 /** @type: flag the packet type */
1098 enum ol_tx_hl_packet_type type;
1099 /** @stream_id: stream identifier */
1100 uint16_t stream_id;
1101 /** @ack_number: tcp ack number */
1102 uint32_t ack_number;
1103 /** @dst_ip: destination ip address */
1104 uint32_t dst_ip;
1105 /** @src_ip: source ip address */
1106 uint32_t src_ip;
1107 /** @dst_port: destination port */
1108 uint16_t dst_port;
1109 /** @src_port: source port */
1110 uint16_t src_port;
1111};
1112
1113/**
1114 * struct tcp_stream_node - tcp stream node
1115 */
1116struct tcp_stream_node {
1117 /** @next: next tcp stream node */
1118 struct tcp_stream_node *next;
1119 /** @no_of_ack_replaced: count for ack replaced frames */
1120 uint8_t no_of_ack_replaced;
1121 /** @stream_id: stream identifier */
1122 uint16_t stream_id;
1123 /** @dst_ip: destination ip address */
1124 uint32_t dst_ip;
1125 /** @src_ip: source ip address */
1126 uint32_t src_ip;
1127 /** @dst_port: destination port */
1128 uint16_t dst_port;
1129 /** @src_port: source port */
1130 uint16_t src_port;
1131 /** @ack_number: tcp ack number */
1132 uint32_t ack_number;
1133 /** @head: point to the tcp ack frame */
1134 qdf_nbuf_t head;
1135};
1136
1137/**
1138 * struct tcp_del_ack_hash_node - hash node for tcp delayed ack
1139 */
1140struct tcp_del_ack_hash_node {
1141 /** @hash_node_lock: spin lock */
1142 qdf_spinlock_t hash_node_lock;
1143 /** @no_of_entries: number of entries */
1144 uint8_t no_of_entries;
1145 /** @head: the head of the steam node list */
1146 struct tcp_stream_node *head;
1147};
1148
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001149struct ol_txrx_vdev_t {
1150 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
Yun Park4a2be572017-04-09 10:03:41 -07001151 * the parent of this virtual device
1152 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153 uint8_t vdev_id; /* ID used to specify a particular vdev
Yun Park4a2be572017-04-09 10:03:41 -07001154 * to the target
1155 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001156 void *osif_dev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301157
1158 void *ctrl_vdev; /* vdev objmgr handle */
1159
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001160 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
1161 /* tx paused - NO LONGER NEEDED? */
1162 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
Yun Park4a2be572017-04-09 10:03:41 -07001163 * of vdevs
1164 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001165 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
1166 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
Yun Park4a2be572017-04-09 10:03:41 -07001167 * this vdev (not "self"
1168 * pseudo-peer)
1169 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001170 ol_txrx_rx_fp rx; /* receive function used by this vdev */
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301171 ol_txrx_stats_rx_fp stats_rx; /* receive function used by this vdev */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001172
1173 struct {
Alok Kumar75355aa2018-03-19 17:32:58 +05301174 uint32_t txack_success;
1175 uint32_t txack_failed;
1176 } txrx_stats;
1177
Alok Kumar4696fb02018-06-06 00:10:18 +05301178 /* completion function used by this vdev*/
1179 ol_txrx_completion_fp tx_comp;
1180
Alok Kumar75355aa2018-03-19 17:32:58 +05301181 struct {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001182 /*
1183 * If the vdev object couldn't be deleted immediately because
1184 * it still had some peer objects left, remember that a delete
1185 * was requested, so it can be deleted once all its peers have
1186 * been deleted.
1187 */
1188 int pending;
1189 /*
1190 * Store a function pointer and a context argument to provide a
1191 * notification for when the vdev is deleted.
1192 */
1193 ol_txrx_vdev_delete_cb callback;
1194 void *context;
wadesong5e2e8012017-08-21 16:56:03 +08001195 atomic_t detaching;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001196 } delete;
1197
1198 /* safe mode control to bypass the encrypt and decipher process */
1199 uint32_t safemode;
1200
1201 /* rx filter related */
1202 uint32_t drop_unenc;
1203 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1204 uint32_t num_filters;
1205
1206 enum wlan_op_mode opmode;
1207
1208#ifdef QCA_IBSS_SUPPORT
1209 /* ibss mode related */
1210 int16_t ibss_peer_num; /* the number of active peers */
1211 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
1212#endif
1213
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301214#if defined(CONFIG_HL_SUPPORT)
1215 struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
1216#endif
1217
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001218 struct {
1219 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301220 qdf_nbuf_t head;
1221 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001222 int depth;
1223 } txq;
1224 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301225 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301226 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001227 int max_q_depth;
1228 bool is_q_paused;
1229 bool is_q_timer_on;
1230 uint32_t q_pause_cnt;
1231 uint32_t q_unpause_cnt;
1232 uint32_t q_overflow_cnt;
1233 } ll_pause;
1234 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301235 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001236 uint16_t tx_fl_lwm;
1237 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301238 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001239 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
bings284f8be2017-08-11 10:41:30 +08001240 ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001241 void *osif_fc_ctx;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301242
Tiger Yue40e7832019-04-25 10:46:53 +08001243#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
1244 /** @driver_del_ack_enabled: true if tcp delayed ack enabled*/
1245 bool driver_del_ack_enabled;
1246 /** @no_of_tcpack_replaced: number of tcp ack replaced */
1247 uint32_t no_of_tcpack_replaced;
1248 /** @no_of_tcpack: number of tcp ack frames */
1249 uint32_t no_of_tcpack;
1250
1251 /** @tcp_ack_hash: hash table for tcp delay ack running information */
1252 struct {
1253 /** @node: tcp ack frame will be stored in this hash table */
1254 struct tcp_del_ack_hash_node node[OL_TX_HL_DEL_ACK_HASH_SIZE];
1255 /** @timer: timeout if no more tcp ack feeding */
1256 __qdf_hrtimer_data_t timer;
1257 /** @is_timer_running: is timer running? */
1258 qdf_atomic_t is_timer_running;
1259 /** @tcp_node_in_use_count: number of nodes in use */
1260 qdf_atomic_t tcp_node_in_use_count;
1261 /** @tcp_del_ack_tq: bh to handle the tcp delayed ack */
1262 qdf_bh_t tcp_del_ack_tq;
1263 /** @tcp_free_list: free list */
1264 struct tcp_stream_node *tcp_free_list;
1265 /** @tcp_free_list_lock: spin lock */
1266 qdf_spinlock_t tcp_free_list_lock;
1267 } tcp_ack_hash;
1268#endif
1269
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301270#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1271 union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1272 bool hlTdlsFlag;
1273#endif
1274
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301275#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301276 qdf_atomic_t tx_desc_count;
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301277 int tx_desc_limit;
1278 int queue_restart_th;
1279 int queue_stop_th;
1280 int prio_q_paused;
1281#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301282
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001283 uint16_t wait_on_peer_id;
Abhishek Singh217d9782017-04-28 23:49:11 +05301284 union ol_txrx_align_mac_addr_t last_peer_mac_addr;
Anurag Chouhance0dc992016-02-16 18:18:03 +05301285 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001286#if defined(FEATURE_TSO)
1287 struct {
1288 int pool_elems; /* total number of elements in the pool */
1289 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301290 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001291 } tso_pool_t;
1292#endif
1293
Jeff Johnson4ceed382018-05-06 16:24:57 -07001294 /* last channel change event received */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001295 struct {
1296 bool is_valid; /* whether the rest of the members are valid */
1297 uint16_t mhz;
1298 uint16_t band_center_freq1;
1299 uint16_t band_center_freq2;
1300 WLAN_PHY_MODE phy_mode;
1301 } ocb_channel_event;
1302
1303 /* Information about the schedules in the schedule */
1304 struct ol_txrx_ocb_chan_info *ocb_channel_info;
1305 uint32_t ocb_channel_count;
1306
1307#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1308 struct ol_tx_flow_pool_t *pool;
1309#endif
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301310 /* intra bss forwarded tx and rx packets count */
1311 uint64_t fwd_tx_packets;
1312 uint64_t fwd_rx_packets;
Nirav Shah2e583a02016-04-30 14:06:12 +05301313 bool is_wisa_mode_enable;
Nirav Shahc657ef52016-07-26 14:22:38 +05301314 uint8_t mac_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001315};
1316
1317struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301318 qdf_nbuf_t head;
1319 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320};
1321
1322struct ol_rx_reorder_t {
1323 uint8_t win_sz;
1324 uint8_t win_sz_mask;
1325 uint8_t num_mpdus;
1326 struct ol_rx_reorder_array_elem_t *array;
1327 /* base - single rx reorder element used for non-aggr cases */
1328 struct ol_rx_reorder_array_elem_t base;
1329#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1330 struct ol_rx_reorder_timeout_list_elem_t timeout;
1331#endif
1332 /* only used for defrag right now */
1333 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1334 uint32_t defrag_timeout_ms;
1335 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
Yun Park4a2be572017-04-09 10:03:41 -07001336 * waitlist
1337 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001338 uint16_t tid;
1339};
1340
1341enum {
1342 txrx_sec_mcast = 0,
1343 txrx_sec_ucast
1344};
1345
1346typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1347 tx_msdu_info);
1348
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301349#define OL_TXRX_PEER_SECURITY_MULTICAST 0
1350#define OL_TXRX_PEER_SECURITY_UNICAST 1
1351#define OL_TXRX_PEER_SECURITY_MAX 2
1352
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001353
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08001354/* Allow 6000 ms to receive peer unmap events after peer is deleted */
1355#define OL_TXRX_PEER_UNMAP_TIMEOUT (6000)
1356
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001357struct ol_txrx_cached_bufq_t {
1358 /* cached_bufq is used to enqueue the pending RX frames from a peer
1359 * before the peer is registered for data service. The list will be
1360 * flushed to HDD once that station is registered.
1361 */
1362 struct list_head cached_bufq;
1363 /* mutual exclusion lock to access the cached_bufq queue */
1364 qdf_spinlock_t bufq_lock;
1365 /* # entries in queue after which subsequent adds will be dropped */
1366 uint32_t thresh;
1367 /* # entries in present in cached_bufq */
1368 uint32_t curr;
1369 /* # max num of entries in the queue if bufq thresh was not in place */
1370 uint32_t high_water_mark;
1371 /* # max num of entries in the queue if we did not drop packets */
1372 uint32_t qdepth_no_thresh;
1373 /* # of packes (beyond threshold) dropped from cached_bufq */
1374 uint32_t dropped;
1375};
1376
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377struct ol_txrx_peer_t {
1378 struct ol_txrx_vdev_t *vdev;
1379
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05301380 /* UMAC peer objmgr handle */
1381 struct cdp_ctrl_objmgr_peer *ctrl_peer;
1382
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301383 qdf_atomic_t ref_cnt;
Mohit Khannab7bec722017-11-10 11:43:44 -08001384 qdf_atomic_t access_list[PEER_DEBUG_ID_MAX];
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301385 qdf_atomic_t delete_in_progress;
1386 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001387
1388 /* The peer state tracking is used for HL systems
1389 * that don't support tx and rx filtering within the target.
1390 * In such systems, the peer's state determines what kind of
1391 * tx and rx filtering, if any, is done.
1392 * This variable doesn't apply to LL systems, or to HL systems for
1393 * which the target handles tx and rx filtering. However, it is
1394 * simplest to declare and update this variable unconditionally,
1395 * for all systems.
1396 */
1397 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301398 qdf_spinlock_t peer_info_lock;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001399
1400 /* Wrapper around the cached_bufq list */
1401 struct ol_txrx_cached_bufq_t bufq_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001402
1403 ol_tx_filter_func tx_filter;
1404
1405 /* peer ID(s) for this peer */
1406 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1407#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1408 uint16_t local_id;
1409#endif
1410
1411 union ol_txrx_align_mac_addr_t mac_addr;
1412
1413 /* node in the vdev's list of peers */
1414 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1415 /* node in the hash table bin's list of peers */
1416 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1417
1418 /*
1419 * per TID info -
1420 * stored in separate arrays to avoid alignment padding mem overhead
1421 */
1422 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1423 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1424 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
Zhu Jianmine9afed52017-08-29 19:26:42 +08001425 uint8_t tids_rekey_flag[OL_TXRX_NUM_EXT_TIDS];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001426 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1427 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1428 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1429
1430 struct {
1431 enum htt_sec_type sec_type;
1432 uint32_t michael_key[2]; /* relevant for TKIP */
1433 } security[2]; /* 0 -> multicast, 1 -> unicast */
1434
1435 /*
1436 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1437 * regular rx processing, or has been redirected to a /dev/null discard
1438 * function when peer deletion is in progress.
1439 */
1440 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1441 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -07001442 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001443
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301444#if defined(CONFIG_HL_SUPPORT)
1445 struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1446#endif
1447
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001448#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1449 ol_txrx_peer_stats_t stats;
1450#endif
1451 int16_t rssi_dbm;
1452
1453 /* NAWDS Flag and Bss Peer bit */
1454 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1455
1456 /* QoS info */
1457 uint8_t qos_capable;
1458 /* U-APSD tid mask */
1459 uint8_t uapsd_mask;
1460 /*flag indicating key installed */
1461 uint8_t keyinstalled;
1462
1463 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301464 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001465
1466#ifdef WLAN_FEATURE_11W
1467 /* PN counter for Robust Management Frames */
1468 uint64_t last_rmf_pn;
1469 uint32_t rmf_pn_replays;
1470 uint8_t last_rmf_pn_valid;
1471#endif
1472
1473 /* Properties of the last received PPDU */
1474 int16_t last_pkt_rssi_cmb;
1475 int16_t last_pkt_rssi[4];
1476 uint8_t last_pkt_legacy_rate;
1477 uint8_t last_pkt_legacy_rate_sel;
1478 uint32_t last_pkt_timestamp_microsec;
1479 uint8_t last_pkt_timestamp_submicrosec;
1480 uint32_t last_pkt_tsf;
1481 uint8_t last_pkt_tid;
1482 uint16_t last_pkt_center_freq;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301483#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1484 u_int16_t tx_limit;
1485 u_int16_t tx_limit_flag;
1486 u_int16_t tx_pause_flag;
1487#endif
Krishna Kumaar Natarajanb7f9a352016-03-18 11:40:07 -07001488 qdf_time_t last_assoc_rcvd;
1489 qdf_time_t last_disassoc_rcvd;
1490 qdf_time_t last_deauth_rcvd;
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08001491 qdf_atomic_t fw_create_pending;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07001492 qdf_timer_t peer_unmap_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001493};
1494
Nirav Shahc657ef52016-07-26 14:22:38 +05301495struct ol_rx_remote_data {
1496 qdf_nbuf_t msdu;
1497 uint8_t mac_id;
1498};
1499
Alok Kumar504230b2018-01-24 17:40:30 +05301500struct ol_fw_data {
1501 void *data;
1502 uint32_t len;
1503};
1504
lifeng74c9a6d2017-02-22 15:15:38 +08001505#define INVALID_REORDER_INDEX 0xFFFF
1506
Yuanyuan Liu23a8eec2017-12-15 16:01:12 -08001507#define SPS_DESC_SIZE 8
1508
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001509#endif /* _OL_TXRX_TYPES__H_ */