blob: 8836e726551d312c59e900e7836f8b1cdbd05661 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_types.h
30 * @brief Define the major data types used internally by the host datapath SW.
31 */
32#ifndef _OL_TXRX_TYPES__H_
33#define _OL_TXRX_TYPES__H_
34
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053035#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053036#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <cds_queue.h> /* TAILQ */
38#include <a_types.h> /* A_UINT8 */
39#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053040#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053042#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053043#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#include <pktlog.h> /* ol_pktlog_dev_handle */
45#include <ol_txrx_stats.h>
46#include <txrx.h>
47#include "ol_txrx_htt_api.h"
48#include "ol_htt_tx_api.h"
49#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080050#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -070051#include "ol_txrx_osif_api.h" /* ol_rx_callback */
Dhanashri Atreb08959a2016-03-01 17:28:03 -080052#include "cdp_txrx_flow_ctrl_v2.h"
53#include "cdp_txrx_peer_ops.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
55/*
56 * The target may allocate multiple IDs for a peer.
57 * In particular, the target may allocate one ID to represent the
58 * multicast key the peer uses, and another ID to represent the
59 * unicast key the peer uses.
60 */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -070061#define MAX_NUM_PEER_ID_PER_PEER 16
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080063/* OL_TXRX_NUM_EXT_TIDS -
64 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
65 */
66#define OL_TXRX_NUM_EXT_TIDS 19
67
68#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
69#define OL_TX_NON_QOS_TID 16
70#define OL_TX_MGMT_TID 17
71#define OL_TX_NUM_TIDS 18
72#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
73
Houston Hoffman43d47fa2016-02-24 16:34:30 -080074#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
75#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076#define OL_TX_VDEV_NUM_QUEUES 2
77
78#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
79#define OL_TXRX_MGMT_NUM_TYPES 8
80
Anurag Chouhana37b5b72016-02-21 14:53:42 +053081#define OL_TX_MUTEX_TYPE qdf_spinlock_t
82#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083
84/* TXRX Histogram defines */
85#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
86#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
87
gbian016a42e2017-03-01 18:49:11 +080088#define OL_TXRX_INVALID_VDEV_ID (-1)
89
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080090struct ol_txrx_pdev_t;
91struct ol_txrx_vdev_t;
92struct ol_txrx_peer_t;
93
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080094/* rx filter related */
95#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
96
97enum privacy_filter {
98 PRIVACY_FILTER_ALWAYS,
99 PRIVACY_FILTER_KEY_UNAVAILABLE,
100};
101
102enum privacy_filter_packet_type {
103 PRIVACY_FILTER_PACKET_UNICAST,
104 PRIVACY_FILTER_PACKET_MULTICAST,
105 PRIVACY_FILTER_PACKET_BOTH
106};
107
108struct privacy_exemption {
109 /* ethertype -
110 * type of ethernet frames this filter applies to, in host byte order
111 */
112 uint16_t ether_type;
113 enum privacy_filter filter_type;
114 enum privacy_filter_packet_type packet_type;
115};
116
117enum ol_tx_frm_type {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700118 OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
119 OL_TX_FRM_TSO, /* TSO segment, with a modified IP header added */
120 OL_TX_FRM_AUDIO, /* audio frames, with a custom LLC/SNAP hdr added */
121 OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
gbiane55c9562016-11-01 14:47:47 +0800122 ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800123};
124
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530125#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
126
127#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
128
129enum ol_tx_peer_bal_state {
130 ol_tx_peer_bal_enable = 0,
131 ol_tx_peer_bal_disable,
132};
133
134enum ol_tx_peer_bal_timer_state {
135 ol_tx_peer_bal_timer_disable = 0,
136 ol_tx_peer_bal_timer_active,
137 ol_tx_peer_bal_timer_inactive,
138};
139
140struct ol_tx_limit_peer_t {
141 u_int16_t limit_flag;
142 u_int16_t peer_id;
143 u_int16_t limit;
144};
145
146enum tx_peer_level {
147 TXRX_IEEE11_B = 0,
148 TXRX_IEEE11_A_G,
149 TXRX_IEEE11_N,
150 TXRX_IEEE11_AC,
Will Huang03cb2ab2017-06-22 10:42:01 +0800151 TXRX_IEEE11_AX,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530152 TXRX_IEEE11_MAX,
153};
154
155struct tx_peer_threshold {
156 u_int32_t tput_thresh;
157 u_int32_t tx_limit;
158};
159#endif
160
161
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530163 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530166 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530168 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530169 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170 enum htt_tx_status status;
171
172#ifdef QCA_COMPUTE_TX_DELAY
173 uint32_t entry_timestamp_ticks;
174#endif
175 /*
176 * Allow tx descriptors to be stored in (doubly-linked) lists.
177 * This is mainly used for HL tx queuing and scheduling, but is
178 * also used by LL+HL for batch processing of tx frames.
179 */
180 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
181
182 /*
183 * Remember whether the tx frame is a regular packet, or whether
184 * the driver added extra header fragments (e.g. a modified IP header
185 * for TSO fragments, or an added LLC/SNAP header for audio interworking
186 * data) that need to be handled in a special manner.
187 * This field is filled in with the ol_tx_frm_type enum.
188 */
189 uint8_t pkt_type;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530190
gbian016a42e2017-03-01 18:49:11 +0800191 u_int8_t vdev_id;
192
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530193 struct ol_txrx_vdev_t *vdev;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530194
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530195 void *txq;
196
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
Yun Park4a2be572017-04-09 10:03:41 -0700198 /*
199 * used by tx encap, to restore the os buf start offset
200 * after tx complete
201 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202 uint8_t orig_l2_hdr_bytes;
203#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530204
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205#ifdef QCA_LL_TX_FLOW_CONTROL_V2
206 struct ol_tx_flow_pool_t *pool;
207#endif
208 void *tso_desc;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530209 void *tso_num_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800210};
211
212typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
213
214union ol_tx_desc_list_elem_t {
215 union ol_tx_desc_list_elem_t *next;
216 struct ol_tx_desc_t tx_desc;
217};
218
219union ol_txrx_align_mac_addr_t {
220 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
221 struct {
222 uint16_t bytes_ab;
223 uint16_t bytes_cd;
224 uint16_t bytes_ef;
225 } align2;
226 struct {
227 uint32_t bytes_abcd;
228 uint16_t bytes_ef;
229 } align4;
230};
231
232struct ol_rx_reorder_timeout_list_elem_t {
233 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
234 reorder_timeout_list_elem;
235 uint32_t timestamp_ms;
236 struct ol_txrx_peer_t *peer;
237 uint8_t tid;
238 uint8_t active;
239};
240
241#define TXRX_TID_TO_WMM_AC(_tid) ( \
242 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
243 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
244 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
245 TXRX_WMM_AC_BE)
246
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530247enum {
248 OL_TX_SCHED_WRR_ADV_CAT_BE,
249 OL_TX_SCHED_WRR_ADV_CAT_BK,
250 OL_TX_SCHED_WRR_ADV_CAT_VI,
251 OL_TX_SCHED_WRR_ADV_CAT_VO,
252 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
253 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
254 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
255 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
256
257 OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
258};
259
hquc7f560c2017-06-26 17:14:37 +0800260A_COMPILE_TIME_ASSERT(ol_tx_sched_htt_ac_values,
261 /* check that regular WMM AC enum values match */
262 ((int)OL_TX_SCHED_WRR_ADV_CAT_VO == (int)HTT_AC_WMM_VO) &&
263 ((int)OL_TX_SCHED_WRR_ADV_CAT_VI == (int)HTT_AC_WMM_VI) &&
264 ((int)OL_TX_SCHED_WRR_ADV_CAT_BK == (int)HTT_AC_WMM_BK) &&
265 ((int)OL_TX_SCHED_WRR_ADV_CAT_BE == (int)HTT_AC_WMM_BE) &&
266
267 /* check that extension AC enum values match */
268 ((int)OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA
269 == (int)HTT_AC_EXT_NON_QOS) &&
270 ((int)OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT
271 == (int)HTT_AC_EXT_UCAST_MGMT) &&
272 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA
273 == (int)HTT_AC_EXT_MCAST_DATA) &&
274 ((int)OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT
275 == (int)HTT_AC_EXT_MCAST_MGMT));
276
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800277struct ol_tx_reorder_cat_timeout_t {
278 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530279 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800280 uint32_t duration_ms;
281 struct ol_txrx_pdev_t *pdev;
282};
283
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530284enum ol_tx_scheduler_status {
285 ol_tx_scheduler_idle = 0,
286 ol_tx_scheduler_running,
287};
288
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289enum ol_tx_queue_status {
290 ol_tx_queue_empty = 0,
291 ol_tx_queue_active,
292 ol_tx_queue_paused,
293};
294
295struct ol_txrx_msdu_info_t {
296 struct htt_msdu_info_t htt;
297 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530298 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800299};
300
301enum {
302 ol_tx_aggr_untried = 0,
303 ol_tx_aggr_enabled,
304 ol_tx_aggr_disabled,
305 ol_tx_aggr_retry,
306 ol_tx_aggr_in_progress,
307};
308
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530309#define OL_TX_MAX_GROUPS_PER_QUEUE 1
310#define OL_TX_MAX_VDEV_ID 16
311#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
312 (((_membership) & 0xffff0000) >> 16)
313#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
314 ((_mask >> _vdev_id) & 0x01)
315#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
316 ((_membership) & 0x0000ffff)
317#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
318 ((_mask >> _ac_mask) & 0x01)
319#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
320 ((_vdev_mask << 16) | _ac_mask)
321
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322struct ol_tx_frms_queue_t {
323 /* list_elem -
324 * Allow individual tx frame queues to be linked together into
325 * scheduler queues of tx frame queues
326 */
327 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
328 uint8_t aggr_state;
329 struct {
330 uint8_t total;
331 /* pause requested by ctrl SW rather than txrx SW */
332 uint8_t by_ctrl;
333 } paused_count;
334 uint8_t ext_tid;
335 uint16_t frms;
336 uint32_t bytes;
337 ol_tx_desc_list head;
338 enum ol_tx_queue_status flag;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530339 struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
340#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
341 struct ol_txrx_peer_t *peer;
342#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343};
344
345enum {
346 ol_tx_log_entry_type_invalid,
347 ol_tx_log_entry_type_queue_state,
348 ol_tx_log_entry_type_enqueue,
349 ol_tx_log_entry_type_dequeue,
350 ol_tx_log_entry_type_drop,
351 ol_tx_log_entry_type_queue_free,
352
353 ol_tx_log_entry_type_wrap,
354};
355
356struct ol_tx_log_queue_state_var_sz_t {
357 uint32_t active_bitmap;
358 uint16_t credit;
359 uint8_t num_cats_active;
360 uint8_t data[1];
361};
362
363struct ol_tx_log_queue_add_t {
364 uint8_t num_frms;
365 uint8_t tid;
366 uint16_t peer_id;
367 uint16_t num_bytes;
368};
369
370struct ol_mac_addr {
371 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
372};
373
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530374struct ol_tx_sched_t;
375
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376#ifndef ol_txrx_local_peer_id_t
377#define ol_txrx_local_peer_id_t uint8_t /* default */
378#endif
379
380#ifdef QCA_COMPUTE_TX_DELAY
381/*
382 * Delay histogram bins: 16 bins of 10 ms each to count delays
383 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
384 */
385#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
386#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
387
388struct ol_tx_delay_data {
389 struct {
390 uint64_t transmit_sum_ticks;
391 uint64_t queue_sum_ticks;
392 uint32_t transmit_num;
393 uint32_t queue_num;
394 } avgs;
395 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
396};
397
398#endif /* QCA_COMPUTE_TX_DELAY */
399
400/* Thermal Mitigation */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401enum throttle_phase {
402 THROTTLE_PHASE_OFF,
403 THROTTLE_PHASE_ON,
404 /* Invalid */
405 THROTTLE_PHASE_MAX,
406};
407
408#define THROTTLE_TX_THRESHOLD (100)
409
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530410/*
411 * Threshold to stop/start priority queue in term of % the actual flow start
412 * and stop thresholds. When num of available descriptors falls below
413 * stop_priority_th, priority queue will be paused. When num of available
414 * descriptors are greater than start_priority_th, priority queue will be
415 * un-paused.
416 */
417#define TX_PRIORITY_TH (80)
418
419/*
420 * No of maximum descriptor used by TSO jumbo packet with
421 * 64K aggregation.
422 */
423#define MAX_TSO_SEGMENT_DESC (44)
424
Yun Parkb4f591d2017-03-29 15:51:01 -0700425typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *usr_ctxt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530427struct ol_tx_queue_group_t {
428 qdf_atomic_t credit;
429 u_int32_t membership;
430};
431#define OL_TX_MAX_TXQ_GROUPS 2
432
433#define OL_TX_GROUP_STATS_LOG_SIZE 128
434struct ol_tx_group_credit_stats_t {
435 struct {
436 struct {
437 u_int16_t member_vdevs;
438 u_int16_t credit;
439 } grp[OL_TX_MAX_TXQ_GROUPS];
440 } stats[OL_TX_GROUP_STATS_LOG_SIZE];
441 u_int16_t last_valid_index;
442 u_int16_t wrap_around;
443};
444
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445#ifdef QCA_LL_TX_FLOW_CONTROL_V2
446
447/**
448 * enum flow_pool_status - flow pool status
449 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
450 * and network queues are unpaused
451 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
452 * and network queues are paused
453 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
454 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530455 * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456 */
457enum flow_pool_status {
458 FLOW_POOL_ACTIVE_UNPAUSED = 0,
459 FLOW_POOL_ACTIVE_PAUSED = 1,
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530460 FLOW_POOL_NON_PRIO_PAUSED = 2,
461 FLOW_POOL_INVALID = 3,
462 FLOW_POOL_INACTIVE = 4
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463};
464
465/**
466 * struct ol_txrx_pool_stats - flow pool related statistics
467 * @pool_map_count: flow pool map received
468 * @pool_unmap_count: flow pool unmap received
469 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470 */
471struct ol_txrx_pool_stats {
472 uint16_t pool_map_count;
473 uint16_t pool_unmap_count;
474 uint16_t pkt_drop_no_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475};
476
477/**
478 * struct ol_tx_flow_pool_t - flow_pool info
479 * @flow_pool_list_elem: flow_pool_list element
480 * @flow_pool_lock: flow_pool lock
481 * @flow_pool_id: flow_pool id
482 * @flow_pool_size: flow_pool size
483 * @avail_desc: available descriptors
484 * @deficient_desc: deficient descriptors
485 * @status: flow pool status
486 * @flow_type: flow pool type
487 * @member_flow_id: member flow id
488 * @stop_th: stop threshold
489 * @start_th: start threshold
490 * @freelist: tx descriptor freelist
Nirav Shahda008342016-05-17 18:50:40 +0530491 * @pkt_drop_no_desc: drop due to no descriptors
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530492 * @ref_cnt: pool's ref count
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530493 * @stop_priority_th: Threshold to stop priority queue
494 * @start_priority_th: Threshold to start priority queue
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 */
496struct ol_tx_flow_pool_t {
497 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530498 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499 uint8_t flow_pool_id;
500 uint16_t flow_pool_size;
501 uint16_t avail_desc;
502 uint16_t deficient_desc;
503 enum flow_pool_status status;
504 enum htt_flow_type flow_type;
505 uint8_t member_flow_id;
506 uint16_t stop_th;
507 uint16_t start_th;
508 union ol_tx_desc_list_elem_t *freelist;
Nirav Shahda008342016-05-17 18:50:40 +0530509 uint16_t pkt_drop_no_desc;
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530510 qdf_atomic_t ref_cnt;
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530511 uint16_t stop_priority_th;
512 uint16_t start_priority_th;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513};
514
515#endif
516
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700517/*
518 * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
519 * @peer: Pointer to peer object
520 * @peer_id_ref_cnt: No. of firmware references to the peer_id
521 * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
522 * after the peer object is deleted on the host.
523 *
524 * peer_id is used as an index into the array of ol_txrx_peer_id_map.
525 */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530526struct ol_txrx_peer_id_map {
527 struct ol_txrx_peer_t *peer;
528 qdf_atomic_t peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700529 qdf_atomic_t del_peer_id_ref_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530530};
531
tfyu9fcabd72017-09-26 17:46:48 +0800532/**
533 * ol_txrx_stats_req_internal - specifications of the requested
534 * statistics internally
535 */
536struct ol_txrx_stats_req_internal {
537 struct ol_txrx_stats_req base;
538 TAILQ_ENTRY(ol_txrx_stats_req_internal) req_list_elem;
539 int serviced; /* state of this request */
540 int offset;
541};
542
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543/*
544 * As depicted in the diagram below, the pdev contains an array of
545 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
546 * Each element identifies all the tx queues that are active for
547 * the TID, from the different peers.
548 *
549 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
550 * Each element identifies the tx frames for the TID that need to be sent
551 * to the peer.
552 *
553 *
554 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
555 * TID
556 * 0 1 2 17
557 * +============+============+============+== ==+============+
558 * | active (y) | active (n) | active (n) | | active (y) |
559 * |------------+------------+------------+-- --+------------|
560 * | queues | queues | queues | | queues |
561 * +============+============+============+== ==+============+
562 * | |
563 * .--+-----------------------------------------------'
564 * | |
565 * | | peer X: peer Y:
566 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
567 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
568 * | | TID +======+ TID +======+
569 * | `---->| next |-------------------------->| next |--X
570 * | 0 | prev | .------. .------. 0 | prev | .------.
571 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
572 * | +======+ `------' `------' +======+ `------'
573 * | | next | | | 1 | next | |
574 * | 1 | prev | v v | prev | v
575 * | | txq | .------. .------. | txq | .------.
576 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
577 * | | next | `------' `------' | next | `------'
578 * | 2 | prev | 2 | prev |
579 * | | txq | | txq |
580 * | +======+ +======+
581 * | | | | |
582 * |
583 * |
584 * | | | | |
585 * | +======+ +======+
586 * `------->| next |--X | next |
587 * 17 | prev | .------. 17 | prev |
588 * | txq |-->|txdesc| | txq |
589 * +======+ `------' +======+
590 * |
591 * v
592 * .------.
593 * |netbuf|
594 * `------'
595 */
596struct ol_txrx_pdev_t {
597 /* ctrl_pdev - handle for querying config info */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800598 struct cdp_cfg *ctrl_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800599
600 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530601 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602
603 htt_pdev_handle htt_pdev;
604
605#ifdef WLAN_FEATURE_FASTPATH
606 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
607 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
608#endif /* WLAN_FEATURE_FASTPATH */
609
610 struct {
611 int is_high_latency;
612 int host_addba;
613 int ll_pause_txq_limit;
614 int default_tx_comp_req;
615 } cfg;
616
617 /* WDI subscriber's event list */
618 wdi_event_subscribe **wdi_event_list;
619
Komal Seelamc4b28632016-02-03 15:02:18 +0530620#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
621 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800622 /* Pktlog pdev */
623 struct ol_pktlog_dev_t *pl_dev;
624#endif /* #ifndef REMOVE_PKT_LOG */
625
626 enum ol_sec_type sec_types[htt_num_sec_types];
627 /* standard frame type */
628 enum wlan_frm_fmt frame_format;
629 enum htt_pkt_type htt_pkt_type;
630
631#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
632 /* txrx encap/decap */
633 uint8_t sw_tx_encap;
634 uint8_t sw_rx_decap;
635 uint8_t target_tx_tran_caps;
636 uint8_t target_rx_tran_caps;
637 /* llc process */
638 uint8_t sw_tx_llc_proc_enable;
639 uint8_t sw_rx_llc_proc_enable;
640 /* A-MSDU */
641 uint8_t sw_subfrm_hdr_recovery_enable;
642 /* Protected Frame bit handling */
643 uint8_t sw_pf_proc_enable;
644#endif
645 /*
646 * target tx credit -
647 * not needed for LL, but used for HL download scheduler to keep
648 * track of roughly how much space is available in the target for
649 * tx frames
650 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530651 qdf_atomic_t target_tx_credit;
652 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653
654 /* Peer mac address to staid mapping */
655 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
656
657 /* ol_txrx_vdev list */
658 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
659
tfyu9fcabd72017-09-26 17:46:48 +0800660 TAILQ_HEAD(, ol_txrx_stats_req_internal) req_list;
661 int req_list_depth;
662 qdf_spinlock_t req_list_spinlock;
663
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 /* peer ID to peer object map (array of pointers to peer objects) */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530665 struct ol_txrx_peer_id_map *peer_id_to_obj_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666
667 struct {
Yun Park4a2be572017-04-09 10:03:41 -0700668 unsigned int mask;
669 unsigned int idx_bits;
670
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800671 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
672 } peer_hash;
673
674 /* rx specific processing */
675 struct {
676 struct {
677 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
678 uint32_t timeout_ms;
679 } defrag;
680 struct {
681 int defrag_timeout_check;
682 int dup_check;
683 } flags;
684
685 struct {
686 struct ol_tx_reorder_cat_timeout_t
687 access_cats[TXRX_NUM_WMM_AC];
688 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530689 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 } rx;
691
692 /* rx proc function */
693 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
694 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -0700695 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800696
697 /* tx data delivery notification callback function */
698 struct {
699 ol_txrx_data_tx_cb func;
700 void *ctxt;
701 } tx_data_callback;
702
703 /* tx management delivery notification callback functions */
704 struct {
705 struct {
706 ol_txrx_mgmt_tx_cb download_cb;
707 ol_txrx_mgmt_tx_cb ota_ack_cb;
708 void *ctxt;
709 } callbacks[OL_TXRX_MGMT_NUM_TYPES];
710 } tx_mgmt;
711
Poddar, Siddarth34872782017-08-10 14:08:51 +0530712 data_stall_detect_cb data_stall_detect_callback;
Himanshu Agarwalf65bd4c2016-12-05 17:21:12 +0530713 /* packetdump callback functions */
714 tp_ol_packetdump_cb ol_tx_packetdump_cb;
715 tp_ol_packetdump_cb ol_rx_packetdump_cb;
716
Yu Wangceb357b2017-06-01 12:04:18 +0800717#ifdef WLAN_FEATURE_TSF_PLUS
718 tp_ol_timestamp_cb ol_tx_timestamp_cb;
719#endif
720
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800721 struct {
722 uint16_t pool_size;
723 uint16_t num_free;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530724 union ol_tx_desc_list_elem_t *array;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800725 union ol_tx_desc_list_elem_t *freelist;
726#ifdef QCA_LL_TX_FLOW_CONTROL_V2
727 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530728 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800729 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
730#endif
Leo Chang376398b2015-10-23 14:19:02 -0700731 uint32_t page_size;
732 uint16_t desc_reserved_size;
733 uint8_t page_divider;
734 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530735 struct qdf_mem_multi_page_t desc_pages;
Nirav Shah76291962016-04-25 10:50:37 +0530736#ifdef DESC_DUP_DETECT_DEBUG
Houston Hoffman088e4b92016-09-01 13:51:06 -0700737 unsigned long *free_list_bitmap;
Nirav Shah76291962016-04-25 10:50:37 +0530738#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800739 } tx_desc;
740
Nirav Shah22bf44d2015-12-10 15:39:48 +0530741 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800742#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
743 struct ol_txrx_pool_stats pool_stats;
744 uint32_t num_msdu_desc;
745#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
746 struct ol_tx_flow_pool_t *mgmt_pool;
747#endif
748#endif
749
750 struct {
751 int (*cmp)(union htt_rx_pn_t *new,
752 union htt_rx_pn_t *old,
753 int is_unicast, int opmode);
754 int len;
755 } rx_pn[htt_num_sec_types];
756
757 /* tx mutex */
758 OL_TX_MUTEX_TYPE tx_mutex;
759
760 /*
761 * peer ref mutex:
762 * 1. Protect peer object lookups until the returned peer object's
763 * reference count is incremented.
764 * 2. Provide mutex when accessing peer object lookup structures.
765 */
766 OL_RX_MUTEX_TYPE peer_ref_mutex;
767
768 /*
769 * last_real_peer_mutex:
770 * Protect lookups of any vdev's last_real_peer pointer until the
771 * reference count for the pointed-to peer object is incremented.
772 * This mutex could be in the vdev struct, but it's slightly simpler
773 * to have a single lock in the pdev struct. Since the lock is only
774 * held for an extremely short time, and since it's very unlikely for
775 * two vdev's to concurrently access the lock, there's no real
776 * benefit to having a per-vdev lock.
777 */
778 OL_RX_MUTEX_TYPE last_real_peer_mutex;
779
Mohit Khanna37ffb292016-08-08 16:20:01 -0700780 qdf_spinlock_t peer_map_unmap_lock;
781
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800782 struct {
783 struct {
784 struct {
785 struct {
786 uint64_t ppdus;
787 uint64_t mpdus;
788 } normal;
789 struct {
790 /*
791 * mpdu_bad is general -
792 * replace it with the specific counters
793 * below
794 */
795 uint64_t mpdu_bad;
796 /* uint64_t mpdu_fcs; */
797 /* uint64_t mpdu_duplicate; */
798 /* uint64_t mpdu_pn_replay; */
799 /* uint64_t mpdu_bad_sender; */
800 /* ^ comment: peer not found */
801 /* uint64_t mpdu_flushed; */
802 /* uint64_t msdu_defrag_mic_err; */
803 uint64_t msdu_mc_dup_drop;
804 } err;
805 } rx;
806 } priv;
807 struct ol_txrx_stats pub;
808 } stats;
809
810#if defined(ENABLE_RX_REORDER_TRACE)
811 struct {
812 uint32_t mask;
813 uint32_t idx;
814 uint64_t cnt;
815#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
816 struct {
817 uint16_t reorder_idx;
818 uint16_t seq_num;
819 uint8_t num_mpdus;
820 uint8_t tid;
821 } *data;
822 } rx_reorder_trace;
823#endif /* ENABLE_RX_REORDER_TRACE */
824
825#if defined(ENABLE_RX_PN_TRACE)
826 struct {
827 uint32_t mask;
828 uint32_t idx;
829 uint64_t cnt;
830#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
831 struct {
832 struct ol_txrx_peer_t *peer;
833 uint32_t pn32;
834 uint16_t seq_num;
835 uint8_t unicast;
836 uint8_t tid;
837 } *data;
838 } rx_pn_trace;
839#endif /* ENABLE_RX_PN_TRACE */
840
841#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
842 bool host_80211_enable;
843#endif
844
845 /*
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530846 * tx_sched only applies for HL, but is defined unconditionally
847 * rather than only if defined(CONFIG_HL_SUPPORT).
848 * This is because the struct only
849 * occupies a few bytes, and to avoid the complexity of
850 * wrapping references
851 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
852 * compilation.
853 * If this struct gets expanded to a non-trivial size,
854 * then it should be
855 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
856 */
857 qdf_spinlock_t tx_queue_spinlock;
858 struct {
859 enum ol_tx_scheduler_status tx_sched_status;
860 struct ol_tx_sched_t *scheduler;
861 struct ol_tx_frms_queue_t *last_used_txq;
862 } tx_sched;
863 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800864 * tx_queue only applies for HL, but is defined unconditionally to avoid
865 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
866 * conditional compilation.
867 */
868 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530869 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800870 /* threshold_lo - when to start tx desc margin replenishment */
871 uint16_t rsrc_threshold_lo;
Yun Park4a2be572017-04-09 10:03:41 -0700872 /*
873 * threshold_hi - where to stop during tx desc margin
874 * replenishment
875 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800876 uint16_t rsrc_threshold_hi;
877 } tx_queue;
878
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530879#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
880#define OL_TXQ_LOG_SIZE 512
881 qdf_spinlock_t txq_log_spinlock;
882 struct {
883 int size;
884 int oldest_record_offset;
885 int offset;
886 int allow_wrap;
887 u_int32_t wrapped;
888 /* aligned to u_int32_t boundary */
889 u_int8_t data[OL_TXQ_LOG_SIZE];
890 } txq_log;
891#endif
892
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800893#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530894 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895#endif
896
897 int rssi_update_shift;
898 int rssi_new_weight;
899#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
900 struct {
901 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
902 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530903 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800904 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
905 } local_peer_ids;
906#endif
907
908#ifdef QCA_COMPUTE_TX_DELAY
909#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
910#define QCA_TX_DELAY_NUM_CATEGORIES \
911 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
912#else
913#define QCA_TX_DELAY_NUM_CATEGORIES 1
914#endif
915 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530916 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800917 struct {
918 struct ol_tx_delay_data copies[2]; /* ping-pong */
919 int in_progress_idx;
920 uint32_t avg_start_time_ticks;
921 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
922 uint32_t tx_compl_timestamp_ticks;
923 uint32_t avg_period_ticks;
924 uint32_t hist_internal_bin_width_mult;
925 uint32_t hist_internal_bin_width_shift;
926 } tx_delay;
927
928 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
929 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
930
931#endif /* QCA_COMPUTE_TX_DELAY */
932
933 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530934 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800935 /* timer used to monitor the throttle "on" phase and
Yun Park4a2be572017-04-09 10:03:41 -0700936 * "off" phase
937 */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530938 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800939 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530940 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800941 /* This is the time in ms of the throttling window, it will
Yun Park4a2be572017-04-09 10:03:41 -0700942 * include an "on" phase and an "off" phase
943 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800944 uint32_t throttle_period_ms;
945 /* Current throttle level set by the client ex. level 0,
Yun Park4a2be572017-04-09 10:03:41 -0700946 * level 1, etc
947 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800948 enum throttle_level current_throttle_level;
949 /* Index that points to the phase within the throttle period */
950 enum throttle_phase current_throttle_phase;
951 /* Maximum number of frames to send to the target at one time */
952 uint32_t tx_threshold;
953 /* stores time in ms of on/off phase for each throttle level */
954 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
955 /* mark true if traffic is paused due to thermal throttling */
956 bool is_paused;
957 } tx_throttle;
958
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800959#if defined(FEATURE_TSO)
960 struct {
961 uint16_t pool_size;
962 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530963 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800964 /* tso mutex */
965 OL_TX_MUTEX_TYPE tso_mutex;
966 } tso_seg_pool;
Poddar, Siddarth3f1fb132017-01-12 17:25:52 +0530967 struct {
968 uint16_t num_seg_pool_size;
969 uint16_t num_free;
970 struct qdf_tso_num_seg_elem_t *freelist;
971 /* tso mutex */
972 OL_TX_MUTEX_TYPE tso_num_seg_mutex;
973 } tso_num_seg_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800974#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530975
976#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
977 struct {
978 enum ol_tx_peer_bal_state enabled;
979 qdf_spinlock_t mutex;
980 /* timer used to trigger more frames for bad peers */
981 qdf_timer_t peer_bal_timer;
982 /*This is the time in ms of the peer balance timer period */
983 u_int32_t peer_bal_period_ms;
984 /*This is the txq limit */
985 u_int32_t peer_bal_txq_limit;
986 /*This is the state of the peer balance timer */
987 enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
988 /*This is the counter about active peers which are under
Yun Park4a2be572017-04-09 10:03:41 -0700989 *tx flow control
990 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530991 u_int32_t peer_num;
992 /*This is peer list which are under tx flow control */
993 struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
994 /*This is threshold configurationl */
995 struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
996 } tx_peer_bal;
997#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
998
999 struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
1000#ifdef DEBUG_HL_LOGGING
1001 qdf_spinlock_t grp_stat_spinlock;
1002 struct ol_tx_group_credit_stats_t grp_stats;
1003#endif
1004 int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001005 uint8_t ocb_peer_valid;
1006 struct ol_txrx_peer_t *ocb_peer;
Manjunathappa Prakash6c547362017-03-30 20:11:47 -07001007 tx_pause_callback pause_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008
1009 struct {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010 void (*lro_flush_cb)(void *);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -07001011 qdf_atomic_t lro_dev_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001012 } lro_info;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -07001013 struct ol_txrx_peer_t *self_peer;
Deepak Dhamdhere64bfe972017-06-14 18:04:53 -07001014 qdf_work_t peer_unmap_timer_work;
Yun Parkb4f591d2017-03-29 15:51:01 -07001015
1016#ifdef IPA_OFFLOAD
1017 ipa_uc_op_cb_type ipa_uc_op_cb;
1018 void *usr_ctxt;
1019 struct ol_txrx_ipa_resources ipa_resource;
1020#endif /* IPA_UC_OFFLOAD */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001021};
1022
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001023struct ol_txrx_vdev_t {
1024 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
Yun Park4a2be572017-04-09 10:03:41 -07001025 * the parent of this virtual device
1026 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027 uint8_t vdev_id; /* ID used to specify a particular vdev
Yun Park4a2be572017-04-09 10:03:41 -07001028 * to the target
1029 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001030 void *osif_dev;
1031 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
1032 /* tx paused - NO LONGER NEEDED? */
1033 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
Yun Park4a2be572017-04-09 10:03:41 -07001034 * of vdevs
1035 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001036 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
1037 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
Yun Park4a2be572017-04-09 10:03:41 -07001038 * this vdev (not "self"
1039 * pseudo-peer)
1040 */
Dhanashri Atre182b0272016-02-17 15:35:07 -08001041 ol_txrx_rx_fp rx; /* receive function used by this vdev */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001042
1043 struct {
1044 /*
1045 * If the vdev object couldn't be deleted immediately because
1046 * it still had some peer objects left, remember that a delete
1047 * was requested, so it can be deleted once all its peers have
1048 * been deleted.
1049 */
1050 int pending;
1051 /*
1052 * Store a function pointer and a context argument to provide a
1053 * notification for when the vdev is deleted.
1054 */
1055 ol_txrx_vdev_delete_cb callback;
1056 void *context;
wadesong5e2e8012017-08-21 16:56:03 +08001057 atomic_t detaching;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058 } delete;
1059
1060 /* safe mode control to bypass the encrypt and decipher process */
1061 uint32_t safemode;
1062
1063 /* rx filter related */
1064 uint32_t drop_unenc;
1065 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
1066 uint32_t num_filters;
1067
1068 enum wlan_op_mode opmode;
1069
1070#ifdef QCA_IBSS_SUPPORT
1071 /* ibss mode related */
1072 int16_t ibss_peer_num; /* the number of active peers */
1073 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
1074#endif
1075
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301076#if defined(CONFIG_HL_SUPPORT)
1077 struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
1078#endif
1079
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001080 struct {
1081 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301082 qdf_nbuf_t head;
1083 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001084 int depth;
1085 } txq;
1086 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301087 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +05301088 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001089 int max_q_depth;
1090 bool is_q_paused;
1091 bool is_q_timer_on;
1092 uint32_t q_pause_cnt;
1093 uint32_t q_unpause_cnt;
1094 uint32_t q_overflow_cnt;
1095 } ll_pause;
1096 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301097 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001098 uint16_t tx_fl_lwm;
1099 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301100 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001101 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
bings284f8be2017-08-11 10:41:30 +08001102 ol_txrx_tx_flow_control_is_pause_fp osif_flow_control_is_pause;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001103 void *osif_fc_ctx;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301104
1105#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1106 union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1107 bool hlTdlsFlag;
1108#endif
1109
1110#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1111 qdf_atomic_t tx_desc_count;
1112#endif
1113
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001114 uint16_t wait_on_peer_id;
Abhishek Singh217d9782017-04-28 23:49:11 +05301115 union ol_txrx_align_mac_addr_t last_peer_mac_addr;
Anurag Chouhance0dc992016-02-16 18:18:03 +05301116 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001117#if defined(FEATURE_TSO)
1118 struct {
1119 int pool_elems; /* total number of elements in the pool */
1120 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301121 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001122 } tso_pool_t;
1123#endif
1124
1125 /* last channel change event recieved */
1126 struct {
1127 bool is_valid; /* whether the rest of the members are valid */
1128 uint16_t mhz;
1129 uint16_t band_center_freq1;
1130 uint16_t band_center_freq2;
1131 WLAN_PHY_MODE phy_mode;
1132 } ocb_channel_event;
1133
1134 /* Information about the schedules in the schedule */
1135 struct ol_txrx_ocb_chan_info *ocb_channel_info;
1136 uint32_t ocb_channel_count;
1137
1138#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1139 struct ol_tx_flow_pool_t *pool;
1140#endif
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301141 /* intra bss forwarded tx and rx packets count */
1142 uint64_t fwd_tx_packets;
1143 uint64_t fwd_rx_packets;
Nirav Shah2e583a02016-04-30 14:06:12 +05301144 bool is_wisa_mode_enable;
Nirav Shahc657ef52016-07-26 14:22:38 +05301145 uint8_t mac_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001146};
1147
1148struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301149 qdf_nbuf_t head;
1150 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001151};
1152
1153struct ol_rx_reorder_t {
1154 uint8_t win_sz;
1155 uint8_t win_sz_mask;
1156 uint8_t num_mpdus;
1157 struct ol_rx_reorder_array_elem_t *array;
1158 /* base - single rx reorder element used for non-aggr cases */
1159 struct ol_rx_reorder_array_elem_t base;
1160#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1161 struct ol_rx_reorder_timeout_list_elem_t timeout;
1162#endif
1163 /* only used for defrag right now */
1164 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1165 uint32_t defrag_timeout_ms;
1166 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
Yun Park4a2be572017-04-09 10:03:41 -07001167 * waitlist
1168 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001169 uint16_t tid;
1170};
1171
1172enum {
1173 txrx_sec_mcast = 0,
1174 txrx_sec_ucast
1175};
1176
1177typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1178 tx_msdu_info);
1179
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301180#define OL_TXRX_PEER_SECURITY_MULTICAST 0
1181#define OL_TXRX_PEER_SECURITY_UNICAST 1
1182#define OL_TXRX_PEER_SECURITY_MAX 2
1183
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001184
Deepak Dhamdheree1c2e212017-01-13 01:54:02 -08001185/* Allow 6000 ms to receive peer unmap events after peer is deleted */
1186#define OL_TXRX_PEER_UNMAP_TIMEOUT (6000)
1187
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001188struct ol_txrx_cached_bufq_t {
1189 /* cached_bufq is used to enqueue the pending RX frames from a peer
1190 * before the peer is registered for data service. The list will be
1191 * flushed to HDD once that station is registered.
1192 */
1193 struct list_head cached_bufq;
1194 /* mutual exclusion lock to access the cached_bufq queue */
1195 qdf_spinlock_t bufq_lock;
1196 /* # entries in queue after which subsequent adds will be dropped */
1197 uint32_t thresh;
1198 /* # entries in present in cached_bufq */
1199 uint32_t curr;
1200 /* # max num of entries in the queue if bufq thresh was not in place */
1201 uint32_t high_water_mark;
1202 /* # max num of entries in the queue if we did not drop packets */
1203 uint32_t qdepth_no_thresh;
1204 /* # of packes (beyond threshold) dropped from cached_bufq */
1205 uint32_t dropped;
1206};
1207
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001208struct ol_txrx_peer_t {
1209 struct ol_txrx_vdev_t *vdev;
1210
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301211 qdf_atomic_t ref_cnt;
1212 qdf_atomic_t delete_in_progress;
1213 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001214
1215 /* The peer state tracking is used for HL systems
1216 * that don't support tx and rx filtering within the target.
1217 * In such systems, the peer's state determines what kind of
1218 * tx and rx filtering, if any, is done.
1219 * This variable doesn't apply to LL systems, or to HL systems for
1220 * which the target handles tx and rx filtering. However, it is
1221 * simplest to declare and update this variable unconditionally,
1222 * for all systems.
1223 */
1224 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301225 qdf_spinlock_t peer_info_lock;
Mohit Khanna78cb6bb2017-03-31 17:05:14 -07001226
1227 /* Wrapper around the cached_bufq list */
1228 struct ol_txrx_cached_bufq_t bufq_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001229
1230 ol_tx_filter_func tx_filter;
1231
1232 /* peer ID(s) for this peer */
1233 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1234#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1235 uint16_t local_id;
1236#endif
1237
1238 union ol_txrx_align_mac_addr_t mac_addr;
1239
1240 /* node in the vdev's list of peers */
1241 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1242 /* node in the hash table bin's list of peers */
1243 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1244
1245 /*
1246 * per TID info -
1247 * stored in separate arrays to avoid alignment padding mem overhead
1248 */
1249 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1250 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1251 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
Zhu Jianmine9afed52017-08-29 19:26:42 +08001252 uint8_t tids_rekey_flag[OL_TXRX_NUM_EXT_TIDS];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001253 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1254 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1255 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1256
1257 struct {
1258 enum htt_sec_type sec_type;
1259 uint32_t michael_key[2]; /* relevant for TKIP */
1260 } security[2]; /* 0 -> multicast, 1 -> unicast */
1261
1262 /*
1263 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1264 * regular rx processing, or has been redirected to a /dev/null discard
1265 * function when peer deletion is in progress.
1266 */
1267 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1268 struct ol_txrx_peer_t *peer,
Yun Park4a2be572017-04-09 10:03:41 -07001269 unsigned int tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001270
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301271#if defined(CONFIG_HL_SUPPORT)
1272 struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1273#endif
1274
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001275#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1276 ol_txrx_peer_stats_t stats;
1277#endif
1278 int16_t rssi_dbm;
1279
1280 /* NAWDS Flag and Bss Peer bit */
1281 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1282
1283 /* QoS info */
1284 uint8_t qos_capable;
1285 /* U-APSD tid mask */
1286 uint8_t uapsd_mask;
1287 /*flag indicating key installed */
1288 uint8_t keyinstalled;
1289
1290 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301291 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001292
1293#ifdef WLAN_FEATURE_11W
1294 /* PN counter for Robust Management Frames */
1295 uint64_t last_rmf_pn;
1296 uint32_t rmf_pn_replays;
1297 uint8_t last_rmf_pn_valid;
1298#endif
1299
1300 /* Properties of the last received PPDU */
1301 int16_t last_pkt_rssi_cmb;
1302 int16_t last_pkt_rssi[4];
1303 uint8_t last_pkt_legacy_rate;
1304 uint8_t last_pkt_legacy_rate_sel;
1305 uint32_t last_pkt_timestamp_microsec;
1306 uint8_t last_pkt_timestamp_submicrosec;
1307 uint32_t last_pkt_tsf;
1308 uint8_t last_pkt_tid;
1309 uint16_t last_pkt_center_freq;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301310#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1311 u_int16_t tx_limit;
1312 u_int16_t tx_limit_flag;
1313 u_int16_t tx_pause_flag;
1314#endif
Krishna Kumaar Natarajanb7f9a352016-03-18 11:40:07 -07001315 qdf_time_t last_assoc_rcvd;
1316 qdf_time_t last_disassoc_rcvd;
1317 qdf_time_t last_deauth_rcvd;
Prakash Dhavali0d3f1d62016-11-20 23:48:24 -08001318 qdf_atomic_t fw_create_pending;
Deepak Dhamdhere2b283c62017-03-30 17:51:53 -07001319 qdf_timer_t peer_unmap_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001320};
1321
Nirav Shahc657ef52016-07-26 14:22:38 +05301322struct ol_rx_remote_data {
1323 qdf_nbuf_t msdu;
1324 uint8_t mac_id;
1325};
1326
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001327#endif /* _OL_TXRX_TYPES__H_ */