blob: 7a36938ba7f1b802dffc874b34c8f453960b7ee2 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth3906e172018-01-09 11:24:58 +05302 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * @file ol_txrx_types.h
21 * @brief Define the major data types used internally by the host datapath SW.
22 */
23#ifndef _OL_TXRX_TYPES__H_
24#define _OL_TXRX_TYPES__H_
25
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053026#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053027#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080028#include <cds_queue.h> /* TAILQ */
29#include <a_types.h> /* A_UINT8 */
30#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053031#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053033#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053034#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080035#include <pktlog.h> /* ol_pktlog_dev_handle */
36#include <ol_txrx_stats.h>
37#include <txrx.h>
38#include "ol_txrx_htt_api.h"
39#include "ol_htt_tx_api.h"
40#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080041#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -070042#include "ol_txrx_osif_api.h" /* ol_rx_callback */
Dhanashri Atreb08959a2016-03-01 17:28:03 -080043#include "cdp_txrx_flow_ctrl_v2.h"
44#include "cdp_txrx_peer_ops.h"
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +053045#include <qdf_trace.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046
47/*
48 * The target may allocate multiple IDs for a peer.
49 * In particular, the target may allocate one ID to represent the
50 * multicast key the peer uses, and another ID to represent the
51 * unicast key the peer uses.
52 */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -070053#define MAX_NUM_PEER_ID_PER_PEER 16
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080055/* OL_TXRX_NUM_EXT_TIDS -
56 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
57 */
58#define OL_TXRX_NUM_EXT_TIDS 19
59
60#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
61#define OL_TX_NON_QOS_TID 16
62#define OL_TX_MGMT_TID 17
63#define OL_TX_NUM_TIDS 18
64#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
65
Houston Hoffman43d47fa2016-02-24 16:34:30 -080066#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
67#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080068#define OL_TX_VDEV_NUM_QUEUES 2
69
70#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
71#define OL_TXRX_MGMT_NUM_TYPES 8
72
Anurag Chouhana37b5b72016-02-21 14:53:42 +053073#define OL_TX_MUTEX_TYPE qdf_spinlock_t
74#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075
76/* TXRX Histogram defines */
77#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
78#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
79
gbian016a42e2017-03-01 18:49:11 +080080#define OL_TXRX_INVALID_VDEV_ID (-1)
81
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080082struct ol_txrx_pdev_t;
83struct ol_txrx_vdev_t;
84struct ol_txrx_peer_t;
85
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080086/* rx filter related */
87#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
88
89enum privacy_filter {
90 PRIVACY_FILTER_ALWAYS,
91 PRIVACY_FILTER_KEY_UNAVAILABLE,
92};
93
94enum privacy_filter_packet_type {
95 PRIVACY_FILTER_PACKET_UNICAST,
96 PRIVACY_FILTER_PACKET_MULTICAST,
97 PRIVACY_FILTER_PACKET_BOTH
98};
99
100struct privacy_exemption {
101 /* ethertype -
102 * type of ethernet frames this filter applies to, in host byte order
103 */
104 uint16_t ether_type;
105 enum privacy_filter filter_type;
106 enum privacy_filter_packet_type packet_type;
107};
108
109enum ol_tx_frm_type {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700110 OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
111 OL_TX_FRM_TSO, /* TSO segment, with a modified IP header added */
112 OL_TX_FRM_AUDIO, /* audio frames, with a custom LLC/SNAP hdr added */
113 OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
gbiane55c9562016-11-01 14:47:47 +0800114 ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800115};
116
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530117#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
118
119#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
120
121enum ol_tx_peer_bal_state {
122 ol_tx_peer_bal_enable = 0,
123 ol_tx_peer_bal_disable,
124};
125
126enum ol_tx_peer_bal_timer_state {
127 ol_tx_peer_bal_timer_disable = 0,
128 ol_tx_peer_bal_timer_active,
129 ol_tx_peer_bal_timer_inactive,
130};
131
132struct ol_tx_limit_peer_t {
133 u_int16_t limit_flag;
134 u_int16_t peer_id;
135 u_int16_t limit;
136};
137
138enum tx_peer_level {
139 TXRX_IEEE11_B = 0,
140 TXRX_IEEE11_A_G,
141 TXRX_IEEE11_N,
142 TXRX_IEEE11_AC,
Will Huang03cb2ab2017-06-22 10:42:01 +0800143 TXRX_IEEE11_AX,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530144 TXRX_IEEE11_MAX,
145};
146
147struct tx_peer_threshold {
148 u_int32_t tput_thresh;
149 u_int32_t tx_limit;
150};
151#endif
152
153
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800154struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530155 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800156 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800157 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530158 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530160 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530161 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162 enum htt_tx_status status;
163
164#ifdef QCA_COMPUTE_TX_DELAY
165 uint32_t entry_timestamp_ticks;
166#endif
Rakshith Suresh Patkar384a28a2018-11-02 16:43:43 +0530167
168#ifdef DESC_TIMESTAMP_DEBUG_INFO
169 struct {
170 uint64_t prev_tx_ts;
171 uint64_t curr_tx_ts;
172 uint64_t last_comp_ts;
173 } desc_debug_info;
174#endif
175
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800176 /*
177 * Allow tx descriptors to be stored in (doubly-linked) lists.
178 * This is mainly used for HL tx queuing and scheduling, but is
179 * also used by LL+HL for batch processing of tx frames.
180 */
181 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
182
183 /*
184 * Remember whether the tx frame is a regular packet, or whether
185 * the driver added extra header fragments (e.g. a modified IP header
186 * for TSO fragments, or an added LLC/SNAP header for audio interworking
187 * data) that need to be handled in a special manner.
188 * This field is filled in with the ol_tx_frm_type enum.
189 */
190 uint8_t pkt_type;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530191
gbian016a42e2017-03-01 18:49:11 +0800192 u_int8_t vdev_id;
193
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530194 struct ol_txrx_vdev_t *vdev;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530195
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530196 void *txq;
197
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
Yun Park4a2be572017-04-09 10:03:41 -0700199 /*
200 * used by tx encap, to restore the os buf start offset
201 * after tx complete
202 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 uint8_t orig_l2_hdr_bytes;
204#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530205
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800206#ifdef QCA_LL_TX_FLOW_CONTROL_V2
207 struct ol_tx_flow_pool_t *pool;
208#endif
209 void *tso_desc;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530210 void *tso_num_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800211};
212
213typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
214
215union ol_tx_desc_list_elem_t {
216 union ol_tx_desc_list_elem_t *next;
217 struct ol_tx_desc_t tx_desc;
218};
219
220union ol_txrx_align_mac_addr_t {
221 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
222 struct {
223 uint16_t bytes_ab;
224 uint16_t bytes_cd;
225 uint16_t bytes_ef;
226 } align2;
227 struct {
228 uint32_t bytes_abcd;
229 uint16_t bytes_ef;
230 } align4;
231};
232
233struct ol_rx_reorder_timeout_list_elem_t {
234 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
235 reorder_timeout_list_elem;
236 uint32_t timestamp_ms;
237 struct ol_txrx_peer_t *peer;
238 uint8_t tid;
239 uint8_t active;
240};
241
242#define TXRX_TID_TO_WMM_AC(_tid) ( \
243 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
244 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
245 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
246 TXRX_WMM_AC_BE)
247
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530248enum {
249 OL_TX_SCHED_WRR_ADV_CAT_BE,
250 OL_TX_SCHED_WRR_ADV_CAT_BK,
251 OL_TX_SCHED_WRR_ADV_CAT_VI,
252 OL_TX_SCHED_WRR_ADV_CAT_VO,
253 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
254 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
255 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
256 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
257
258 OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
259};
260
hquc7f560c2017-06-26 17:14:37 +0800261A_COMPILE_TIME_ASSERT(ol_tx_sched_htt_ac_values,
262 /* check that regular WMM AC enum values match */
263 ((int)OL_TX_SCHED_WRR_ADV_CAT_VO == (int)HTT_AC_WMM_VO) &&
264 ((int)OL_TX_SCHED_WRR_ADV_CAT_VI == (int)HTT_AC_WMM_VI) &&
265 ((int)OL_TX_SCHED_WRR_ADV_CAT_BK == (int)HTT_AC_WMM_BK) &&
266 ((int)OL_TX_SCHED_WRR_ADV_CAT_BE == (int)HTT_AC_WMM_BE) &&
267
268 /* check that extension AC enum values match */
269 ((int)OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA
270 == (int)HTT_AC_EXT_NON_QOS) &&
271 ((int)OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT
272 == (int)HTT_AC_EXT_UCAST_MGMT) &&
273 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA
274 == (int)HTT_AC_EXT_MCAST_DATA) &&
275 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT
276 == (int)HTT_AC_EXT_MCAST_MGMT));
277
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278struct ol_tx_reorder_cat_timeout_t {
279 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530280 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800281 uint32_t duration_ms;
282 struct ol_txrx_pdev_t *pdev;
283};
284
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530285enum ol_tx_scheduler_status {
286 ol_tx_scheduler_idle = 0,
287 ol_tx_scheduler_running,
288};
289
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800290enum ol_tx_queue_status {
291 ol_tx_queue_empty = 0,
292 ol_tx_queue_active,
293 ol_tx_queue_paused,
294};
295
296struct ol_txrx_msdu_info_t {
297 struct htt_msdu_info_t htt;
298 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530299 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300};
301
302enum {
303 ol_tx_aggr_untried = 0,
304 ol_tx_aggr_enabled,
305 ol_tx_aggr_disabled,
306 ol_tx_aggr_retry,
307 ol_tx_aggr_in_progress,
308};
309
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530310#define OL_TX_MAX_GROUPS_PER_QUEUE 1
311#define OL_TX_MAX_VDEV_ID 16
312#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
313 (((_membership) & 0xffff0000) >> 16)
314#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
315 ((_mask >> _vdev_id) & 0x01)
316#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
317 ((_membership) & 0x0000ffff)
318#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
319 ((_mask >> _ac_mask) & 0x01)
320#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
321 ((_vdev_mask << 16) | _ac_mask)
322
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323struct ol_tx_frms_queue_t {
324 /* list_elem -
325 * Allow individual tx frame queues to be linked together into
326 * scheduler queues of tx frame queues
327 */
328 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
329 uint8_t aggr_state;
330 struct {
331 uint8_t total;
332 /* pause requested by ctrl SW rather than txrx SW */
333 uint8_t by_ctrl;
334 } paused_count;
335 uint8_t ext_tid;
336 uint16_t frms;
337 uint32_t bytes;
338 ol_tx_desc_list head;
339 enum ol_tx_queue_status flag;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530340 struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
341#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
342 struct ol_txrx_peer_t *peer;
343#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344};
345
346enum {
347 ol_tx_log_entry_type_invalid,
348 ol_tx_log_entry_type_queue_state,
349 ol_tx_log_entry_type_enqueue,
350 ol_tx_log_entry_type_dequeue,
351 ol_tx_log_entry_type_drop,
352 ol_tx_log_entry_type_queue_free,
353
354 ol_tx_log_entry_type_wrap,
355};
356
357struct ol_tx_log_queue_state_var_sz_t {
358 uint32_t active_bitmap;
359 uint16_t credit;
360 uint8_t num_cats_active;
361 uint8_t data[1];
362};
363
364struct ol_tx_log_queue_add_t {
365 uint8_t num_frms;
366 uint8_t tid;
367 uint16_t peer_id;
368 uint16_t num_bytes;
369};
370
371struct ol_mac_addr {
372 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
373};
374
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530375struct ol_tx_sched_t;
376
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377#ifndef ol_txrx_local_peer_id_t
378#define ol_txrx_local_peer_id_t uint8_t /* default */
379#endif
380
381#ifdef QCA_COMPUTE_TX_DELAY
382/*
383 * Delay histogram bins: 16 bins of 10 ms each to count delays
384 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
385 */
386#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
387#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
388
389struct ol_tx_delay_data {
390 struct {
391 uint64_t transmit_sum_ticks;
392 uint64_t queue_sum_ticks;
393 uint32_t transmit_num;
394 uint32_t queue_num;
395 } avgs;
396 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
397};
398
399#endif /* QCA_COMPUTE_TX_DELAY */
400
401/* Thermal Mitigation */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402enum throttle_phase {
403 THROTTLE_PHASE_OFF,
404 THROTTLE_PHASE_ON,
405 /* Invalid */
406 THROTTLE_PHASE_MAX,
407};
408
409#define THROTTLE_TX_THRESHOLD (100)
410
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530411/*
412 * Threshold to stop/start priority queue in term of % the actual flow start
413 * and stop thresholds. When num of available descriptors falls below
414 * stop_priority_th, priority queue will be paused. When num of available
415 * descriptors are greater than start_priority_th, priority queue will be
416 * un-paused.
417 */
418#define TX_PRIORITY_TH (80)
419
420/*
421 * No of maximum descriptor used by TSO jumbo packet with
422 * 64K aggregation.
423 */
424#define MAX_TSO_SEGMENT_DESC (44)
425
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530426struct ol_tx_queue_group_t {
427 qdf_atomic_t credit;
428 u_int32_t membership;
Ajit Pal Singh43ad30d2018-05-07 18:55:30 +0530429 int frm_count;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530430};
431#define OL_TX_MAX_TXQ_GROUPS 2
432
433#define OL_TX_GROUP_STATS_LOG_SIZE 128
434struct ol_tx_group_credit_stats_t {
435 struct {
436 struct {
437 u_int16_t member_vdevs;
438 u_int16_t credit;
439 } grp[OL_TX_MAX_TXQ_GROUPS];
440 } stats[OL_TX_GROUP_STATS_LOG_SIZE];
441 u_int16_t last_valid_index;
442 u_int16_t wrap_around;
443};
444
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445#ifdef QCA_LL_TX_FLOW_CONTROL_V2
446
447/**
448 * enum flow_pool_status - flow pool status
449 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
450 * and network queues are unpaused
451 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
452 * and network queues are paused
453 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
454 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530455 * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456 */
457enum flow_pool_status {
458 FLOW_POOL_ACTIVE_UNPAUSED = 0,
459 FLOW_POOL_ACTIVE_PAUSED = 1,
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530460 FLOW_POOL_NON_PRIO_PAUSED = 2,
461 FLOW_POOL_INVALID = 3,
462 FLOW_POOL_INACTIVE = 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463};
464
465/**
466 * struct ol_txrx_pool_stats - flow pool related statistics
467 * @pool_map_count: flow pool map received
468 * @pool_unmap_count: flow pool unmap received
Nirav Shaha3cc7192018-03-23 00:03:24 +0530469 * @pool_resize_count: flow pool resize command received
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800471 */
472struct ol_txrx_pool_stats {
473 uint16_t pool_map_count;
474 uint16_t pool_unmap_count;
Nirav Shaha3cc7192018-03-23 00:03:24 +0530475 uint16_t pool_resize_count;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 uint16_t pkt_drop_no_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800477};
478
479/**
480 * struct ol_tx_flow_pool_t - flow_pool info
481 * @flow_pool_list_elem: flow_pool_list element
482 * @flow_pool_lock: flow_pool lock
483 * @flow_pool_id: flow_pool id
484 * @flow_pool_size: flow_pool size
485 * @avail_desc: available descriptors
486 * @deficient_desc: deficient descriptors
Nirav Shaha3cc7192018-03-23 00:03:24 +0530487 * @overflow_desc: overflow descriptors
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800488 * @status: flow pool status
489 * @flow_type: flow pool type
490 * @member_flow_id: member flow id
491 * @stop_th: stop threshold
492 * @start_th: start threshold
493 * @freelist: tx descriptor freelist
Nirav Shahda008342016-05-17 18:50:40 +0530494 * @pkt_drop_no_desc: drop due to no descriptors
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530495 * @ref_cnt: pool's ref count
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530496 * @stop_priority_th: Threshold to stop priority queue
497 * @start_priority_th: Threshold to start priority queue
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 */
499struct ol_tx_flow_pool_t {
500 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530501 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 uint8_t flow_pool_id;
503 uint16_t flow_pool_size;
504 uint16_t avail_desc;
505 uint16_t deficient_desc;
Nirav Shaha3cc7192018-03-23 00:03:24 +0530506 uint16_t overflow_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800507 enum flow_pool_status status;
508 enum htt_flow_type flow_type;
509 uint8_t member_flow_id;
510 uint16_t stop_th;
511 uint16_t start_th;
512 union ol_tx_desc_list_elem_t *freelist;
Nirav Shahda008342016-05-17 18:50:40 +0530513 uint16_t pkt_drop_no_desc;
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530514 qdf_atomic_t ref_cnt;
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530515 uint16_t stop_priority_th;
516 uint16_t start_priority_th;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800517};
518
519#endif
520
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700521/*
522 * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
523 * @peer: Pointer to peer object
524 * @peer_id_ref_cnt: No. of firmware references to the peer_id
525 * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
526 * after the peer object is deleted on the host.
527 *
528 * peer_id is used as an index into the array of ol_txrx_peer_id_map.
529 */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530530struct ol_txrx_peer_id_map {
531 struct ol_txrx_peer_t *peer;
532 qdf_atomic_t peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700533 qdf_atomic_t del_peer_id_ref_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530534};
535
tfyu9fcabd72017-09-26 17:46:48 +0800536/**
537 * ol_txrx_stats_req_internal - specifications of the requested
538 * statistics internally
539 */
540struct ol_txrx_stats_req_internal {
541 struct ol_txrx_stats_req base;
542 TAILQ_ENTRY(ol_txrx_stats_req_internal) req_list_elem;
543 int serviced; /* state of this request */
544 int offset;
545};
546
jitiphil335d2412018-06-07 22:49:24 +0530547struct ol_txrx_fw_stats_desc_t {
548 struct ol_txrx_stats_req_internal *req;
549 unsigned char desc_id;
550};
551
552struct ol_txrx_fw_stats_desc_elem_t {
553 struct ol_txrx_fw_stats_desc_elem_t *next;
554 struct ol_txrx_fw_stats_desc_t desc;
555};
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +0530556
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557/*
558 * As depicted in the diagram below, the pdev contains an array of
559 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
560 * Each element identifies all the tx queues that are active for
561 * the TID, from the different peers.
562 *
563 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
564 * Each element identifies the tx frames for the TID that need to be sent
565 * to the peer.
566 *
567 *
568 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
569 * TID
570 * 0 1 2 17
571 * +============+============+============+== ==+============+
572 * | active (y) | active (n) | active (n) | | active (y) |
573 * |------------+------------+------------+-- --+------------|
574 * | queues | queues | queues | | queues |
575 * +============+============+============+== ==+============+
576 * | |
577 * .--+-----------------------------------------------'
578 * | |
579 * | | peer X: peer Y:
580 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
581 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
582 * | | TID +======+ TID +======+
583 * | `---->| next |-------------------------->| next |--X
584 * | 0 | prev | .------. .------. 0 | prev | .------.
585 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
586 * | +======+ `------' `------' +======+ `------'
587 * | | next | | | 1 | next | |
588 * | 1 | prev | v v | prev | v
589 * | | txq | .------. .------. | txq | .------.
590 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
591 * | | next | `------' `------' | next | `------'
592 * | 2 | prev | 2 | prev |
593 * | | txq | | txq |
594 * | +======+ +======+
595 * | | | | |
596 * |
597 * |
598 * | | | | |
599 * | +======+ +======+
600 * `------->| next |--X | next |
601 * 17 | prev | .------. 17 | prev |
602 * | txq |-->|txdesc| | txq |
603 * +======+ `------' +======+
604 * |
605 * v
606 * .------.
607 * |netbuf|
608 * `------'
609 */
610struct ol_txrx_pdev_t {
611 /* ctrl_pdev - handle for querying config info */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800612 struct cdp_cfg *ctrl_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613
614 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530615 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800616
617 htt_pdev_handle htt_pdev;
618
619#ifdef WLAN_FEATURE_FASTPATH
620 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
621 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
622#endif /* WLAN_FEATURE_FASTPATH */
623
624 struct {
625 int is_high_latency;
626 int host_addba;
627 int ll_pause_txq_limit;
628 int default_tx_comp_req;
Ajit Pal Singhc31d1012018-06-07 19:47:22 +0530629 u8 credit_update_enabled;
630 u8 request_tx_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631 } cfg;
632
633 /* WDI subscriber's event list */
634 wdi_event_subscribe **wdi_event_list;
635
Komal Seelamc4b28632016-02-03 15:02:18 +0530636#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
637 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 /* Pktlog pdev */
Venkata Sharath Chandra Manchala1240fc72017-10-26 17:32:29 -0700639 struct pktlog_dev_t *pl_dev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800640#endif /* #ifndef REMOVE_PKT_LOG */
641
642 enum ol_sec_type sec_types[htt_num_sec_types];
643 /* standard frame type */
644 enum wlan_frm_fmt frame_format;
645 enum htt_pkt_type htt_pkt_type;
646
647#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
648 /* txrx encap/decap */
649 uint8_t sw_tx_encap;
650 uint8_t sw_rx_decap;
651 uint8_t target_tx_tran_caps;
652 uint8_t target_rx_tran_caps;
653 /* llc process */
654 uint8_t sw_tx_llc_proc_enable;
655 uint8_t sw_rx_llc_proc_enable;
656 /* A-MSDU */
657 uint8_t sw_subfrm_hdr_recovery_enable;
658 /* Protected Frame bit handling */
659 uint8_t sw_pf_proc_enable;
660#endif
661 /*
662 * target tx credit -
663 * not needed for LL, but used for HL download scheduler to keep
664 * track of roughly how much space is available in the target for
665 * tx frames
666 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530667 qdf_atomic_t target_tx_credit;
668 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669
jitiphil335d2412018-06-07 22:49:24 +0530670 struct {
671 uint16_t pool_size;
672 struct ol_txrx_fw_stats_desc_elem_t *pool;
673 struct ol_txrx_fw_stats_desc_elem_t *freelist;
674 qdf_spinlock_t pool_lock;
675 qdf_atomic_t initialized;
676 } ol_txrx_fw_stats_desc_pool;
677
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800678 /* Peer mac address to staid mapping */
679 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
680
681 /* ol_txrx_vdev list */
682 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
683
tfyu9fcabd72017-09-26 17:46:48 +0800684 TAILQ_HEAD(, ol_txrx_stats_req_internal) req_list;
685 int req_list_depth;
686 qdf_spinlock_t req_list_spinlock;
687
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800688 /* peer ID to peer object map (array of pointers to peer objects) */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530689 struct ol_txrx_peer_id_map *peer_id_to_obj_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690
691 struct {
Yun Park4a2be572017-04-09 10:03:41 -0700692 unsigned int mask;
693 unsigned int idx_bits;
694
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800695 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
696 } peer_hash;
697
698 /* rx specific processing */
699 struct {
700 struct {
701 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
702 uint32_t timeout_ms;
703 } defrag;
704 struct {
705 int defrag_timeout_check;
706 int dup_check;
707 } flags;
708
709 struct {
710 struct ol_tx_reorder_cat_timeout_t
711 access_cats[TXRX_NUM_WMM_AC];
712 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530713 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714 } rx;
715
716 /* rx proc function */
717 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
718 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -0700719 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720
721 /* tx data delivery notification callback function */
722 struct {
723 ol_txrx_data_tx_cb func;
724 void *ctxt;
725 } tx_data_callback;
726
727 /* tx management delivery notification callback functions */
728 struct {
Sravan Kumar Kairam905b4c52017-10-17 19:38:14 +0530729 ol_txrx_mgmt_tx_cb download_cb;
730 ol_txrx_mgmt_tx_cb ota_ack_cb;
731 void *ctxt;
732 } tx_mgmt_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733
Poddar, Siddarth34872782017-08-10 14:08:51 +0530734 data_stall_detect_cb data_stall_detect_callback;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530735 /* packetdump callback functions */
736 tp_ol_packetdump_cb ol_tx_packetdump_cb;
737 tp_ol_packetdump_cb ol_rx_packetdump_cb;
738
Yu Wangceb357b2017-06-01 12:04:18 +0800739#ifdef WLAN_FEATURE_TSF_PLUS
740 tp_ol_timestamp_cb ol_tx_timestamp_cb;
741#endif
742
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800743 struct {
744 uint16_t pool_size;
745 uint16_t num_free;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530746 union ol_tx_desc_list_elem_t *array;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800747 union ol_tx_desc_list_elem_t *freelist;
748#ifdef QCA_LL_TX_FLOW_CONTROL_V2
749 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530750 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800751 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
752#endif
Leo Chang376398b2015-10-23 14:19:02 -0700753 uint32_t page_size;
754 uint16_t desc_reserved_size;
755 uint8_t page_divider;
756 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530757 struct qdf_mem_multi_page_t desc_pages;
Nirav Shah76291962016-04-25 10:50:37 +0530758#ifdef DESC_DUP_DETECT_DEBUG
Houston Hoffman088e4b92016-09-01 13:51:06 -0700759 unsigned long *free_list_bitmap;
Nirav Shah76291962016-04-25 10:50:37 +0530760#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 } tx_desc;
762
Nirav Shah22bf44d2015-12-10 15:39:48 +0530763 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800764#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
765 struct ol_txrx_pool_stats pool_stats;
766 uint32_t num_msdu_desc;
767#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
768 struct ol_tx_flow_pool_t *mgmt_pool;
769#endif
770#endif
771
772 struct {
773 int (*cmp)(union htt_rx_pn_t *new,
774 union htt_rx_pn_t *old,
775 int is_unicast, int opmode);
776 int len;
777 } rx_pn[htt_num_sec_types];
778
779 /* tx mutex */
780 OL_TX_MUTEX_TYPE tx_mutex;
781
782 /*
783 * peer ref mutex:
784 * 1. Protect peer object lookups until the returned peer object's
785 * reference count is incremented.
786 * 2. Provide mutex when accessing peer object lookup structures.
787 */
788 OL_RX_MUTEX_TYPE peer_ref_mutex;
789
790 /*
791 * last_real_peer_mutex:
792 * Protect lookups of any vdev's last_real_peer pointer until the
793 * reference count for the pointed-to peer object is incremented.
794 * This mutex could be in the vdev struct, but it's slightly simpler
795 * to have a single lock in the pdev struct. Since the lock is only
796 * held for an extremely short time, and since it's very unlikely for
797 * two vdev's to concurrently access the lock, there's no real
798 * benefit to having a per-vdev lock.
799 */
800 OL_RX_MUTEX_TYPE last_real_peer_mutex;
801
Mohit Khanna37ffb292016-08-08 16:20:01 -0700802 qdf_spinlock_t peer_map_unmap_lock;
803
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804 struct {
805 struct {
806 struct {
807 struct {
808 uint64_t ppdus;
809 uint64_t mpdus;
810 } normal;
811 struct {
812 /*
813 * mpdu_bad is general -
814 * replace it with the specific counters
815 * below
816 */
817 uint64_t mpdu_bad;
818 /* uint64_t mpdu_fcs; */
819 /* uint64_t mpdu_duplicate; */
820 /* uint64_t mpdu_pn_replay; */
821 /* uint64_t mpdu_bad_sender; */
822 /* ^ comment: peer not found */
823 /* uint64_t mpdu_flushed; */
824 /* uint64_t msdu_defrag_mic_err; */
825 uint64_t msdu_mc_dup_drop;
826 } err;
827 } rx;
828 } priv;
829 struct ol_txrx_stats pub;
830 } stats;
831
832#if defined(ENABLE_RX_REORDER_TRACE)
833 struct {
834 uint32_t mask;
835 uint32_t idx;
836 uint64_t cnt;
837#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
838 struct {
839 uint16_t reorder_idx;
840 uint16_t seq_num;
841 uint8_t num_mpdus;
842 uint8_t tid;
843 } *data;
844 } rx_reorder_trace;
845#endif /* ENABLE_RX_REORDER_TRACE */
846
847#if defined(ENABLE_RX_PN_TRACE)
848 struct {
849 uint32_t mask;
850 uint32_t idx;
851 uint64_t cnt;
852#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
853 struct {
854 struct ol_txrx_peer_t *peer;
855 uint32_t pn32;
856 uint16_t seq_num;
857 uint8_t unicast;
858 uint8_t tid;
859 } *data;
860 } rx_pn_trace;
861#endif /* ENABLE_RX_PN_TRACE */
862
863#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
864 bool host_80211_enable;
865#endif
866
867 /*
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530868 * tx_sched only applies for HL, but is defined unconditionally
869 * rather than only if defined(CONFIG_HL_SUPPORT).
870 * This is because the struct only
871 * occupies a few bytes, and to avoid the complexity of
872 * wrapping references
873 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
874 * compilation.
875 * If this struct gets expanded to a non-trivial size,
876 * then it should be
877 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
878 */
879 qdf_spinlock_t tx_queue_spinlock;
880 struct {
881 enum ol_tx_scheduler_status tx_sched_status;
882 struct ol_tx_sched_t *scheduler;
883 struct ol_tx_frms_queue_t *last_used_txq;
884 } tx_sched;
885 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800886 * tx_queue only applies for HL, but is defined unconditionally to avoid
887 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
888 * conditional compilation.
889 */
890 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530891 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 /* threshold_lo - when to start tx desc margin replenishment */
893 uint16_t rsrc_threshold_lo;
Yun Park4a2be572017-04-09 10:03:41 -0700894 /*
895 * threshold_hi - where to stop during tx desc margin
896 * replenishment
897 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800898 uint16_t rsrc_threshold_hi;
899 } tx_queue;
900
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530901#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
902#define OL_TXQ_LOG_SIZE 512
903 qdf_spinlock_t txq_log_spinlock;
904 struct {
905 int size;
906 int oldest_record_offset;
907 int offset;
908 int allow_wrap;
909 u_int32_t wrapped;
910 /* aligned to u_int32_t boundary */
911 u_int8_t data[OL_TXQ_LOG_SIZE];
912 } txq_log;
913#endif
914
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800915#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530916 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800917#endif
918
919 int rssi_update_shift;
920 int rssi_new_weight;
921#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
922 struct {
923 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
924 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530925 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800926 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
927 } local_peer_ids;
928#endif
929
930#ifdef QCA_COMPUTE_TX_DELAY
931#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
932#define QCA_TX_DELAY_NUM_CATEGORIES \
933 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
934#else
935#define QCA_TX_DELAY_NUM_CATEGORIES 1
936#endif
937 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530938 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939 struct {
940 struct ol_tx_delay_data copies[2]; /* ping-pong */
941 int in_progress_idx;
942 uint32_t avg_start_time_ticks;
943 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
944 uint32_t tx_compl_timestamp_ticks;
945 uint32_t avg_period_ticks;
946 uint32_t hist_internal_bin_width_mult;
947 uint32_t hist_internal_bin_width_shift;
948 } tx_delay;
949
950 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
951 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
952
953#endif /* QCA_COMPUTE_TX_DELAY */
954
955 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530956 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800957 /* timer used to monitor the throttle "on" phase and
Yun Park4a2be572017-04-09 10:03:41 -0700958 * "off" phase
959 */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530960 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800961 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530962 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800963 /* This is the time in ms of the throttling window, it will
Yun Park4a2be572017-04-09 10:03:41 -0700964 * include an "on" phase and an "off" phase
965 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800966 uint32_t throttle_period_ms;
967 /* Current throttle level set by the client ex. level 0,
Yun Park4a2be572017-04-09 10:03:41 -0700968 * level 1, etc
969 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 enum throttle_level current_throttle_level;
971 /* Index that points to the phase within the throttle period */
972 enum throttle_phase current_throttle_phase;
973 /* Maximum number of frames to send to the target at one time */
974 uint32_t tx_threshold;
975 /* stores time in ms of on/off phase for each throttle level */
976 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
977 /* mark true if traffic is paused due to thermal throttling */
978 bool is_paused;
979 } tx_throttle;
980
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800981#if defined(FEATURE_TSO)
982 struct {
983 uint16_t pool_size;
984 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530985 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800986 /* tso mutex */
987 OL_TX_MUTEX_TYPE tso_mutex;
988 } tso_seg_pool;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530989 struct {
990 uint16_t num_seg_pool_size;
991 uint16_t num_free;
992 struct qdf_tso_num_seg_elem_t *freelist;
993 /* tso mutex */
994 OL_TX_MUTEX_TYPE tso_num_seg_mutex;
995 } tso_num_seg_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800996#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530997
998#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
999 struct {
1000 enum ol_tx_peer_bal_state enabled;
1001 qdf_spinlock_t mutex;
1002 /* timer used to trigger more frames for bad peers */
1003 qdf_timer_t peer_bal_timer;
1004 /*This is the time in ms of the peer balance timer period */
1005 u_int32_t peer_bal_period_ms;
1006 /*This is the txq limit */
1007 u_int32_t peer_bal_txq_limit;
1008 /*This is the state of the peer balance timer */
1009 enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
1010 /*This is the counter about active peers which are under
Yun Park4a2be572017-04-09 10:03:41 -07001011 *tx flow control
1012 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301013 u_int32_t peer_num;
1014 /*This is peer list which are under tx flow control */
1015 struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
1016 /*This is threshold configurationl */
1017 struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
1018 } tx_peer_bal;
1019#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
1020
1021 struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
Ajit Pal Singhb06e0522018-06-15 09:04:27 +05301022#if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
1023 defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
1024 bool limit_lend;
1025 u16 min_reserve;
1026#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301027#ifdef DEBUG_HL_LOGGING
1028 qdf_spinlock_t grp_stat_spinlock;
1029 struct ol_tx_group_credit_stats_t grp_stats;
1030#endif
1031 int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001032 uint8_t ocb_peer_valid;
1033 struct ol_txrx_peer_t *ocb_peer;
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001034 tx_pause_callback pause_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001035
Manjunathappa Prakashfb5f25b2018-03-28 20:05:46 -07001036 void (*offld_flush_cb)(void *);
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001037 struct ol_txrx_peer_t *self_peer;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07001038 qdf_work_t peer_unmap_timer_work;
Yun Parkb4f591d2017-03-29 15:51:01 -07001039
Rakshith Suresh Patkar44f6a8f2018-04-17 16:17:12 +05301040 /* dp debug fs */
1041 struct dentry *dpt_stats_log_dir;
1042 enum qdf_dpt_debugfs_state state;
1043 struct qdf_debugfs_fops dpt_debugfs_fops;
1044
Yun Parkb4f591d2017-03-29 15:51:01 -07001045#ifdef IPA_OFFLOAD
1046 ipa_uc_op_cb_type ipa_uc_op_cb;
1047 void *usr_ctxt;
1048 struct ol_txrx_ipa_resources ipa_resource;
1049#endif /* IPA_UC_OFFLOAD */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001050};
1051
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001052struct ol_txrx_vdev_t {
1053 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
Yun Park4a2be572017-04-09 10:03:41 -07001054 * the parent of this virtual device
1055 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001056 uint8_t vdev_id; /* ID used to specify a particular vdev
Yun Park4a2be572017-04-09 10:03:41 -07001057 * to the target
1058 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001059 void *osif_dev;
Sravan Kumar Kairam43f191b2018-05-04 17:00:39 +05301060
1061 void *ctrl_vdev; /* vdev objmgr handle */
1062
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001063 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
1064 /* tx paused - NO LONGER NEEDED? */
1065 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
Yun Park4a2be572017-04-09 10:03:41 -07001066 * of vdevs
1067 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
1069 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
Yun Park4a2be572017-04-09 10:03:41 -07001070 * this vdev (not "self"
1071 * pseudo-peer)
1072 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001073 ol_txrx_rx_fp rx; /* receive function used by this vdev */
Poddar, Siddarth3906e172018-01-09 11:24:58 +05301074 ol_txrx_stats_rx_fp stats_rx; /* receive function used by this vdev */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001075
1076 struct {
Alok Kumar75355aa2018-03-19 17:32:58 +05301077 uint32_t txack_success;
1078 uint32_t txack_failed;
1079 } txrx_stats;
1080
Alok Kumar4696fb02018-06-06 00:10:18 +05301081 /* completion function used by this vdev*/
1082 ol_txrx_completion_fp tx_comp;
1083
Alok Kumar75355aa2018-03-19 17:32:58 +05301084 struct {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001085 /*
1086 * If the vdev object couldn't be deleted immediately because
1087 * it still had some peer objects left, remember that a delete
1088 * was requested, so it can be deleted once all its peers have
1089 * been deleted.
1090 */
1091 int pending;
1092 /*
1093 * Store a function pointer and a context argument to provide a
1094 * notification for when the vdev is deleted.
1095 */
1096 ol_txrx_vdev_delete_cb callback;
1097 void *context;
wadesong5e2e8012017-08-21 16:56:03 +08001098 atomic_t detaching;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001099 } delete;
1100
1101 /* safe mode control to bypass the encrypt and decipher process */
1102 uint32_t safemode;
1103
1104 /* rx filter related */
1105 uint32_t drop_unenc;
1106 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1107 uint32_t num_filters;
1108
1109 enum wlan_op_mode opmode;
1110
1111#ifdef QCA_IBSS_SUPPORT
1112 /* ibss mode related */
1113 int16_t ibss_peer_num; /* the number of active peers */
1114 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
1115#endif
1116
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301117#if defined(CONFIG_HL_SUPPORT)
1118 struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
1119#endif
1120
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001121 struct {
1122 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301123 qdf_nbuf_t head;
1124 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001125 int depth;
1126 } txq;
1127 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301128 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301129 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001130 int max_q_depth;
1131 bool is_q_paused;
1132 bool is_q_timer_on;
1133 uint32_t q_pause_cnt;
1134 uint32_t q_unpause_cnt;
1135 uint32_t q_overflow_cnt;
1136 } ll_pause;
1137 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301138 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139 uint16_t tx_fl_lwm;
1140 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301141 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
bings284f8be2017-08-11 10:41:30 +08001143 ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001144 void *osif_fc_ctx;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301145
1146#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1147 union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1148 bool hlTdlsFlag;
1149#endif
1150
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301151#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301152 qdf_atomic_t tx_desc_count;
Ajit Pal Singh5bcf68a2018-04-23 12:20:18 +05301153 int tx_desc_limit;
1154 int queue_restart_th;
1155 int queue_stop_th;
1156 int prio_q_paused;
1157#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301158
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001159 uint16_t wait_on_peer_id;
Abhishek Singh217d9782017-04-28 23:49:11 +05301160 union ol_txrx_align_mac_addr_t last_peer_mac_addr;
Anurag Chouhance0dc992016-02-16 18:18:03 +05301161 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001162#if defined(FEATURE_TSO)
1163 struct {
1164 int pool_elems; /* total number of elements in the pool */
1165 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301166 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001167 } tso_pool_t;
1168#endif
1169
Jeff Johnson4ceed382018-05-06 16:24:57 -07001170 /* last channel change event received */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 struct {
1172 bool is_valid; /* whether the rest of the members are valid */
1173 uint16_t mhz;
1174 uint16_t band_center_freq1;
1175 uint16_t band_center_freq2;
1176 WLAN_PHY_MODE phy_mode;
1177 } ocb_channel_event;
1178
1179 /* Information about the schedules in the schedule */
1180 struct ol_txrx_ocb_chan_info *ocb_channel_info;
1181 uint32_t ocb_channel_count;
1182
1183#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1184 struct ol_tx_flow_pool_t *pool;
1185#endif
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301186 /* intra bss forwarded tx and rx packets count */
1187 uint64_t fwd_tx_packets;
1188 uint64_t fwd_rx_packets;
Nirav Shah2e583a02016-04-30 14:06:12 +05301189 bool is_wisa_mode_enable;
Nirav Shahc657ef52016-07-26 14:22:38 +05301190 uint8_t mac_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001191};
1192
1193struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301194 qdf_nbuf_t head;
1195 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001196};
1197
1198struct ol_rx_reorder_t {
1199 uint8_t win_sz;
1200 uint8_t win_sz_mask;
1201 uint8_t num_mpdus;
1202 struct ol_rx_reorder_array_elem_t *array;
1203 /* base - single rx reorder element used for non-aggr cases */
1204 struct ol_rx_reorder_array_elem_t base;
1205#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1206 struct ol_rx_reorder_timeout_list_elem_t timeout;
1207#endif
1208 /* only used for defrag right now */
1209 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1210 uint32_t defrag_timeout_ms;
1211 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
Yun Park4a2be572017-04-09 10:03:41 -07001212 * waitlist
1213 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001214 uint16_t tid;
1215};
1216
1217enum {
1218 txrx_sec_mcast = 0,
1219 txrx_sec_ucast
1220};
1221
1222typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1223 tx_msdu_info);
1224
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301225#define OL_TXRX_PEER_SECURITY_MULTICAST 0
1226#define OL_TXRX_PEER_SECURITY_UNICAST 1
1227#define OL_TXRX_PEER_SECURITY_MAX 2
1228
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001229
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08001230/* Allow 6000 ms to receive peer unmap events after peer is deleted */
1231#define OL_TXRX_PEER_UNMAP_TIMEOUT (6000)
1232
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001233struct ol_txrx_cached_bufq_t {
1234 /* cached_bufq is used to enqueue the pending RX frames from a peer
1235 * before the peer is registered for data service. The list will be
1236 * flushed to HDD once that station is registered.
1237 */
1238 struct list_head cached_bufq;
1239 /* mutual exclusion lock to access the cached_bufq queue */
1240 qdf_spinlock_t bufq_lock;
1241 /* # entries in queue after which subsequent adds will be dropped */
1242 uint32_t thresh;
1243 /* # entries in present in cached_bufq */
1244 uint32_t curr;
1245 /* # max num of entries in the queue if bufq thresh was not in place */
1246 uint32_t high_water_mark;
1247 /* # max num of entries in the queue if we did not drop packets */
1248 uint32_t qdepth_no_thresh;
1249 /* # of packes (beyond threshold) dropped from cached_bufq */
1250 uint32_t dropped;
1251};
1252
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001253struct ol_txrx_peer_t {
1254 struct ol_txrx_vdev_t *vdev;
1255
Sravan Kumar Kairamc273afd2018-05-28 12:12:28 +05301256 /* UMAC peer objmgr handle */
1257 struct cdp_ctrl_objmgr_peer *ctrl_peer;
1258
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301259 qdf_atomic_t ref_cnt;
Mohit Khannab7bec722017-11-10 11:43:44 -08001260 qdf_atomic_t access_list[PEER_DEBUG_ID_MAX];
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301261 qdf_atomic_t delete_in_progress;
1262 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001263
1264 /* The peer state tracking is used for HL systems
1265 * that don't support tx and rx filtering within the target.
1266 * In such systems, the peer's state determines what kind of
1267 * tx and rx filtering, if any, is done.
1268 * This variable doesn't apply to LL systems, or to HL systems for
1269 * which the target handles tx and rx filtering. However, it is
1270 * simplest to declare and update this variable unconditionally,
1271 * for all systems.
1272 */
1273 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301274 qdf_spinlock_t peer_info_lock;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001275
1276 /* Wrapper around the cached_bufq list */
1277 struct ol_txrx_cached_bufq_t bufq_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001278
1279 ol_tx_filter_func tx_filter;
1280
1281 /* peer ID(s) for this peer */
1282 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1283#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1284 uint16_t local_id;
1285#endif
1286
1287 union ol_txrx_align_mac_addr_t mac_addr;
1288
1289 /* node in the vdev's list of peers */
1290 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1291 /* node in the hash table bin's list of peers */
1292 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1293
1294 /*
1295 * per TID info -
1296 * stored in separate arrays to avoid alignment padding mem overhead
1297 */
1298 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1299 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1300 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
Zhu Jianmine9afed52017-08-29 19:26:42 +08001301 uint8_t tids_rekey_flag[OL_TXRX_NUM_EXT_TIDS];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001302 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1303 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1304 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1305
1306 struct {
1307 enum htt_sec_type sec_type;
1308 uint32_t michael_key[2]; /* relevant for TKIP */
1309 } security[2]; /* 0 -> multicast, 1 -> unicast */
1310
1311 /*
1312 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1313 * regular rx processing, or has been redirected to a /dev/null discard
1314 * function when peer deletion is in progress.
1315 */
1316 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1317 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -07001318 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001319
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301320#if defined(CONFIG_HL_SUPPORT)
1321 struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1322#endif
1323
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001324#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1325 ol_txrx_peer_stats_t stats;
1326#endif
1327 int16_t rssi_dbm;
1328
1329 /* NAWDS Flag and Bss Peer bit */
1330 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1331
1332 /* QoS info */
1333 uint8_t qos_capable;
1334 /* U-APSD tid mask */
1335 uint8_t uapsd_mask;
1336 /*flag indicating key installed */
1337 uint8_t keyinstalled;
1338
1339 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301340 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001341
1342#ifdef WLAN_FEATURE_11W
1343 /* PN counter for Robust Management Frames */
1344 uint64_t last_rmf_pn;
1345 uint32_t rmf_pn_replays;
1346 uint8_t last_rmf_pn_valid;
1347#endif
1348
1349 /* Properties of the last received PPDU */
1350 int16_t last_pkt_rssi_cmb;
1351 int16_t last_pkt_rssi[4];
1352 uint8_t last_pkt_legacy_rate;
1353 uint8_t last_pkt_legacy_rate_sel;
1354 uint32_t last_pkt_timestamp_microsec;
1355 uint8_t last_pkt_timestamp_submicrosec;
1356 uint32_t last_pkt_tsf;
1357 uint8_t last_pkt_tid;
1358 uint16_t last_pkt_center_freq;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301359#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1360 u_int16_t tx_limit;
1361 u_int16_t tx_limit_flag;
1362 u_int16_t tx_pause_flag;
1363#endif
Krishna Kumaar Natarajanb7f9a352016-03-18 11:40:07 -07001364 qdf_time_t last_assoc_rcvd;
1365 qdf_time_t last_disassoc_rcvd;
1366 qdf_time_t last_deauth_rcvd;
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08001367 qdf_atomic_t fw_create_pending;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07001368 qdf_timer_t peer_unmap_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001369};
1370
Nirav Shahc657ef52016-07-26 14:22:38 +05301371struct ol_rx_remote_data {
1372 qdf_nbuf_t msdu;
1373 uint8_t mac_id;
1374};
1375
Alok Kumar504230b2018-01-24 17:40:30 +05301376struct ol_fw_data {
1377 void *data;
1378 uint32_t len;
1379};
1380
lifeng74c9a6d2017-02-22 15:15:38 +08001381#define INVALID_REORDER_INDEX 0xFFFF
1382
Yuanyuan Liu23a8eec2017-12-15 16:01:12 -08001383#define SPS_DESC_SIZE 8
1384
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001385#endif /* _OL_TXRX_TYPES__H_ */