blob: 66f8a0f61c586ba805610e58588250fb128c409e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_types.h
30 * @brief Define the major data types used internally by the host datapath SW.
31 */
32#ifndef _OL_TXRX_TYPES__H_
33#define _OL_TXRX_TYPES__H_
34
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053035#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053036#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <cds_queue.h> /* TAILQ */
38#include <a_types.h> /* A_UINT8 */
39#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053040#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053042#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053043#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#include <pktlog.h> /* ol_pktlog_dev_handle */
45#include <ol_txrx_stats.h>
46#include <txrx.h>
47#include "ol_txrx_htt_api.h"
48#include "ol_htt_tx_api.h"
49#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080050#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
51#include "ol_txrx_osif_api.h" /* ol_rx_callback_fp */
Dhanashri Atreb08959a2016-03-01 17:28:03 -080052#include "cdp_txrx_flow_ctrl_v2.h"
53#include "cdp_txrx_peer_ops.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
55/*
56 * The target may allocate multiple IDs for a peer.
57 * In particular, the target may allocate one ID to represent the
58 * multicast key the peer uses, and another ID to represent the
59 * unicast key the peer uses.
60 */
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -070061#define MAX_NUM_PEER_ID_PER_PEER 16
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080063/* OL_TXRX_NUM_EXT_TIDS -
64 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
65 */
66#define OL_TXRX_NUM_EXT_TIDS 19
67
68#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
69#define OL_TX_NON_QOS_TID 16
70#define OL_TX_MGMT_TID 17
71#define OL_TX_NUM_TIDS 18
72#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
73
Houston Hoffman43d47fa2016-02-24 16:34:30 -080074#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
75#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076#define OL_TX_VDEV_NUM_QUEUES 2
77
78#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
79#define OL_TXRX_MGMT_NUM_TYPES 8
80
Anurag Chouhana37b5b72016-02-21 14:53:42 +053081#define OL_TX_MUTEX_TYPE qdf_spinlock_t
82#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083
84/* TXRX Histogram defines */
85#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
86#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
87
88struct ol_txrx_pdev_t;
89struct ol_txrx_vdev_t;
90struct ol_txrx_peer_t;
91
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092/* rx filter related */
93#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
94
95enum privacy_filter {
96 PRIVACY_FILTER_ALWAYS,
97 PRIVACY_FILTER_KEY_UNAVAILABLE,
98};
99
100enum privacy_filter_packet_type {
101 PRIVACY_FILTER_PACKET_UNICAST,
102 PRIVACY_FILTER_PACKET_MULTICAST,
103 PRIVACY_FILTER_PACKET_BOTH
104};
105
106struct privacy_exemption {
107 /* ethertype -
108 * type of ethernet frames this filter applies to, in host byte order
109 */
110 uint16_t ether_type;
111 enum privacy_filter filter_type;
112 enum privacy_filter_packet_type packet_type;
113};
114
115enum ol_tx_frm_type {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700116 OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
117 OL_TX_FRM_TSO, /* TSO segment, with a modified IP header added */
118 OL_TX_FRM_AUDIO, /* audio frames, with a custom LLC/SNAP hdr added */
119 OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
gbiane55c9562016-11-01 14:47:47 +0800120 ol_tx_frm_freed = 0xff, /* the tx desc is in free list */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800121};
122
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530123#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
124
125#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
126
127enum ol_tx_peer_bal_state {
128 ol_tx_peer_bal_enable = 0,
129 ol_tx_peer_bal_disable,
130};
131
132enum ol_tx_peer_bal_timer_state {
133 ol_tx_peer_bal_timer_disable = 0,
134 ol_tx_peer_bal_timer_active,
135 ol_tx_peer_bal_timer_inactive,
136};
137
138struct ol_tx_limit_peer_t {
139 u_int16_t limit_flag;
140 u_int16_t peer_id;
141 u_int16_t limit;
142};
143
144enum tx_peer_level {
145 TXRX_IEEE11_B = 0,
146 TXRX_IEEE11_A_G,
147 TXRX_IEEE11_N,
148 TXRX_IEEE11_AC,
149 TXRX_IEEE11_MAX,
150};
151
152struct tx_peer_threshold {
153 u_int32_t tput_thresh;
154 u_int32_t tx_limit;
155};
156#endif
157
158
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530160 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800161 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800162 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530163 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530165 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530166 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167 enum htt_tx_status status;
168
169#ifdef QCA_COMPUTE_TX_DELAY
170 uint32_t entry_timestamp_ticks;
171#endif
172 /*
173 * Allow tx descriptors to be stored in (doubly-linked) lists.
174 * This is mainly used for HL tx queuing and scheduling, but is
175 * also used by LL+HL for batch processing of tx frames.
176 */
177 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
178
179 /*
180 * Remember whether the tx frame is a regular packet, or whether
181 * the driver added extra header fragments (e.g. a modified IP header
182 * for TSO fragments, or an added LLC/SNAP header for audio interworking
183 * data) that need to be handled in a special manner.
184 * This field is filled in with the ol_tx_frm_type enum.
185 */
186 uint8_t pkt_type;
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -0700187#if defined(CONFIG_HL_SUPPORT)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530188 struct ol_txrx_vdev_t *vdev;
Manjunathappa Prakashaf88fc72016-11-02 17:26:22 -0700189#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530190 void *txq;
191
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800192#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
193 /* used by tx encap, to restore the os buf start offset
194 after tx complete */
195 uint8_t orig_l2_hdr_bytes;
196#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530197
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198#ifdef QCA_LL_TX_FLOW_CONTROL_V2
199 struct ol_tx_flow_pool_t *pool;
200#endif
201 void *tso_desc;
202};
203
204typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
205
206union ol_tx_desc_list_elem_t {
207 union ol_tx_desc_list_elem_t *next;
208 struct ol_tx_desc_t tx_desc;
209};
210
211union ol_txrx_align_mac_addr_t {
212 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
213 struct {
214 uint16_t bytes_ab;
215 uint16_t bytes_cd;
216 uint16_t bytes_ef;
217 } align2;
218 struct {
219 uint32_t bytes_abcd;
220 uint16_t bytes_ef;
221 } align4;
222};
223
224struct ol_rx_reorder_timeout_list_elem_t {
225 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
226 reorder_timeout_list_elem;
227 uint32_t timestamp_ms;
228 struct ol_txrx_peer_t *peer;
229 uint8_t tid;
230 uint8_t active;
231};
232
233#define TXRX_TID_TO_WMM_AC(_tid) ( \
234 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
235 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
236 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
237 TXRX_WMM_AC_BE)
238
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530239enum {
240 OL_TX_SCHED_WRR_ADV_CAT_BE,
241 OL_TX_SCHED_WRR_ADV_CAT_BK,
242 OL_TX_SCHED_WRR_ADV_CAT_VI,
243 OL_TX_SCHED_WRR_ADV_CAT_VO,
244 OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
245 OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
246 OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
247 OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
248
249 OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
250};
251
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800252struct ol_tx_reorder_cat_timeout_t {
253 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530254 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255 uint32_t duration_ms;
256 struct ol_txrx_pdev_t *pdev;
257};
258
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530259enum ol_tx_scheduler_status {
260 ol_tx_scheduler_idle = 0,
261 ol_tx_scheduler_running,
262};
263
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264enum ol_tx_queue_status {
265 ol_tx_queue_empty = 0,
266 ol_tx_queue_active,
267 ol_tx_queue_paused,
268};
269
270struct ol_txrx_msdu_info_t {
271 struct htt_msdu_info_t htt;
272 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530273 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274};
275
276enum {
277 ol_tx_aggr_untried = 0,
278 ol_tx_aggr_enabled,
279 ol_tx_aggr_disabled,
280 ol_tx_aggr_retry,
281 ol_tx_aggr_in_progress,
282};
283
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530284#define OL_TX_MAX_GROUPS_PER_QUEUE 1
285#define OL_TX_MAX_VDEV_ID 16
286#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
287 (((_membership) & 0xffff0000) >> 16)
288#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
289 ((_mask >> _vdev_id) & 0x01)
290#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
291 ((_membership) & 0x0000ffff)
292#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
293 ((_mask >> _ac_mask) & 0x01)
294#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
295 ((_vdev_mask << 16) | _ac_mask)
296
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297struct ol_tx_frms_queue_t {
298 /* list_elem -
299 * Allow individual tx frame queues to be linked together into
300 * scheduler queues of tx frame queues
301 */
302 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
303 uint8_t aggr_state;
304 struct {
305 uint8_t total;
306 /* pause requested by ctrl SW rather than txrx SW */
307 uint8_t by_ctrl;
308 } paused_count;
309 uint8_t ext_tid;
310 uint16_t frms;
311 uint32_t bytes;
312 ol_tx_desc_list head;
313 enum ol_tx_queue_status flag;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530314 struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
315#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
316 struct ol_txrx_peer_t *peer;
317#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800318};
319
320enum {
321 ol_tx_log_entry_type_invalid,
322 ol_tx_log_entry_type_queue_state,
323 ol_tx_log_entry_type_enqueue,
324 ol_tx_log_entry_type_dequeue,
325 ol_tx_log_entry_type_drop,
326 ol_tx_log_entry_type_queue_free,
327
328 ol_tx_log_entry_type_wrap,
329};
330
331struct ol_tx_log_queue_state_var_sz_t {
332 uint32_t active_bitmap;
333 uint16_t credit;
334 uint8_t num_cats_active;
335 uint8_t data[1];
336};
337
338struct ol_tx_log_queue_add_t {
339 uint8_t num_frms;
340 uint8_t tid;
341 uint16_t peer_id;
342 uint16_t num_bytes;
343};
344
345struct ol_mac_addr {
346 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
347};
348
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530349struct ol_tx_sched_t;
350
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800351#ifndef ol_txrx_local_peer_id_t
352#define ol_txrx_local_peer_id_t uint8_t /* default */
353#endif
354
355#ifdef QCA_COMPUTE_TX_DELAY
356/*
357 * Delay histogram bins: 16 bins of 10 ms each to count delays
358 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
359 */
360#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
361#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
362
363struct ol_tx_delay_data {
364 struct {
365 uint64_t transmit_sum_ticks;
366 uint64_t queue_sum_ticks;
367 uint32_t transmit_num;
368 uint32_t queue_num;
369 } avgs;
370 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
371};
372
373#endif /* QCA_COMPUTE_TX_DELAY */
374
375/* Thermal Mitigation */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376enum throttle_phase {
377 THROTTLE_PHASE_OFF,
378 THROTTLE_PHASE_ON,
379 /* Invalid */
380 THROTTLE_PHASE_MAX,
381};
382
383#define THROTTLE_TX_THRESHOLD (100)
384
385typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
386
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530387struct ol_tx_queue_group_t {
388 qdf_atomic_t credit;
389 u_int32_t membership;
390};
391#define OL_TX_MAX_TXQ_GROUPS 2
392
393#define OL_TX_GROUP_STATS_LOG_SIZE 128
394struct ol_tx_group_credit_stats_t {
395 struct {
396 struct {
397 u_int16_t member_vdevs;
398 u_int16_t credit;
399 } grp[OL_TX_MAX_TXQ_GROUPS];
400 } stats[OL_TX_GROUP_STATS_LOG_SIZE];
401 u_int16_t last_valid_index;
402 u_int16_t wrap_around;
403};
404
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405#ifdef QCA_LL_TX_FLOW_CONTROL_V2
406
407/**
408 * enum flow_pool_status - flow pool status
409 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
410 * and network queues are unpaused
411 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
412 * and network queues are paused
413 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
414 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
415 */
416enum flow_pool_status {
417 FLOW_POOL_ACTIVE_UNPAUSED = 0,
418 FLOW_POOL_ACTIVE_PAUSED = 1,
419 FLOW_POOL_INVALID = 2,
420 FLOW_POOL_INACTIVE = 3,
421};
422
423/**
424 * struct ol_txrx_pool_stats - flow pool related statistics
425 * @pool_map_count: flow pool map received
426 * @pool_unmap_count: flow pool unmap received
427 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 */
429struct ol_txrx_pool_stats {
430 uint16_t pool_map_count;
431 uint16_t pool_unmap_count;
432 uint16_t pkt_drop_no_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433};
434
435/**
436 * struct ol_tx_flow_pool_t - flow_pool info
437 * @flow_pool_list_elem: flow_pool_list element
438 * @flow_pool_lock: flow_pool lock
439 * @flow_pool_id: flow_pool id
440 * @flow_pool_size: flow_pool size
441 * @avail_desc: available descriptors
442 * @deficient_desc: deficient descriptors
443 * @status: flow pool status
444 * @flow_type: flow pool type
445 * @member_flow_id: member flow id
446 * @stop_th: stop threshold
447 * @start_th: start threshold
448 * @freelist: tx descriptor freelist
Nirav Shahda008342016-05-17 18:50:40 +0530449 * @pkt_drop_no_desc: drop due to no descriptors
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450 */
451struct ol_tx_flow_pool_t {
452 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530453 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800454 uint8_t flow_pool_id;
455 uint16_t flow_pool_size;
456 uint16_t avail_desc;
457 uint16_t deficient_desc;
458 enum flow_pool_status status;
459 enum htt_flow_type flow_type;
460 uint8_t member_flow_id;
461 uint16_t stop_th;
462 uint16_t start_th;
463 union ol_tx_desc_list_elem_t *freelist;
Nirav Shahda008342016-05-17 18:50:40 +0530464 uint16_t pkt_drop_no_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465};
466
467#endif
468
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700469/*
470 * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
471 * @peer: Pointer to peer object
472 * @peer_id_ref_cnt: No. of firmware references to the peer_id
473 * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
474 * after the peer object is deleted on the host.
475 *
476 * peer_id is used as an index into the array of ol_txrx_peer_id_map.
477 */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530478struct ol_txrx_peer_id_map {
479 struct ol_txrx_peer_t *peer;
480 qdf_atomic_t peer_id_ref_cnt;
Deepak Dhamdheref74d6f82016-09-16 02:47:01 -0700481 qdf_atomic_t del_peer_id_ref_cnt;
Nirav Shahf099e5e2016-04-30 15:38:31 +0530482};
483
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800484/*
485 * As depicted in the diagram below, the pdev contains an array of
486 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
487 * Each element identifies all the tx queues that are active for
488 * the TID, from the different peers.
489 *
490 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
491 * Each element identifies the tx frames for the TID that need to be sent
492 * to the peer.
493 *
494 *
495 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
496 * TID
497 * 0 1 2 17
498 * +============+============+============+== ==+============+
499 * | active (y) | active (n) | active (n) | | active (y) |
500 * |------------+------------+------------+-- --+------------|
501 * | queues | queues | queues | | queues |
502 * +============+============+============+== ==+============+
503 * | |
504 * .--+-----------------------------------------------'
505 * | |
506 * | | peer X: peer Y:
507 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
508 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
509 * | | TID +======+ TID +======+
510 * | `---->| next |-------------------------->| next |--X
511 * | 0 | prev | .------. .------. 0 | prev | .------.
512 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
513 * | +======+ `------' `------' +======+ `------'
514 * | | next | | | 1 | next | |
515 * | 1 | prev | v v | prev | v
516 * | | txq | .------. .------. | txq | .------.
517 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
518 * | | next | `------' `------' | next | `------'
519 * | 2 | prev | 2 | prev |
520 * | | txq | | txq |
521 * | +======+ +======+
522 * | | | | |
523 * |
524 * |
525 * | | | | |
526 * | +======+ +======+
527 * `------->| next |--X | next |
528 * 17 | prev | .------. 17 | prev |
529 * | txq |-->|txdesc| | txq |
530 * +======+ `------' +======+
531 * |
532 * v
533 * .------.
534 * |netbuf|
535 * `------'
536 */
537struct ol_txrx_pdev_t {
538 /* ctrl_pdev - handle for querying config info */
539 ol_pdev_handle ctrl_pdev;
540
541 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530542 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543
544 htt_pdev_handle htt_pdev;
545
546#ifdef WLAN_FEATURE_FASTPATH
547 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
548 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
549#endif /* WLAN_FEATURE_FASTPATH */
550
551 struct {
552 int is_high_latency;
553 int host_addba;
554 int ll_pause_txq_limit;
555 int default_tx_comp_req;
556 } cfg;
557
558 /* WDI subscriber's event list */
559 wdi_event_subscribe **wdi_event_list;
560
Komal Seelamc4b28632016-02-03 15:02:18 +0530561#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
562 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563 /* Pktlog pdev */
564 struct ol_pktlog_dev_t *pl_dev;
565#endif /* #ifndef REMOVE_PKT_LOG */
566
567 enum ol_sec_type sec_types[htt_num_sec_types];
568 /* standard frame type */
569 enum wlan_frm_fmt frame_format;
570 enum htt_pkt_type htt_pkt_type;
571
572#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
573 /* txrx encap/decap */
574 uint8_t sw_tx_encap;
575 uint8_t sw_rx_decap;
576 uint8_t target_tx_tran_caps;
577 uint8_t target_rx_tran_caps;
578 /* llc process */
579 uint8_t sw_tx_llc_proc_enable;
580 uint8_t sw_rx_llc_proc_enable;
581 /* A-MSDU */
582 uint8_t sw_subfrm_hdr_recovery_enable;
583 /* Protected Frame bit handling */
584 uint8_t sw_pf_proc_enable;
585#endif
586 /*
587 * target tx credit -
588 * not needed for LL, but used for HL download scheduler to keep
589 * track of roughly how much space is available in the target for
590 * tx frames
591 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530592 qdf_atomic_t target_tx_credit;
593 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800594
595 /* Peer mac address to staid mapping */
596 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
597
598 /* ol_txrx_vdev list */
599 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
600
601 /* peer ID to peer object map (array of pointers to peer objects) */
Nirav Shahf099e5e2016-04-30 15:38:31 +0530602 struct ol_txrx_peer_id_map *peer_id_to_obj_map;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800603
604 struct {
605 unsigned mask;
606 unsigned idx_bits;
607 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
608 } peer_hash;
609
610 /* rx specific processing */
611 struct {
612 struct {
613 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
614 uint32_t timeout_ms;
615 } defrag;
616 struct {
617 int defrag_timeout_check;
618 int dup_check;
619 } flags;
620
621 struct {
622 struct ol_tx_reorder_cat_timeout_t
623 access_cats[TXRX_NUM_WMM_AC];
624 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530625 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800626 } rx;
627
628 /* rx proc function */
629 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
630 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530631 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800632
633 /* tx data delivery notification callback function */
634 struct {
635 ol_txrx_data_tx_cb func;
636 void *ctxt;
637 } tx_data_callback;
638
639 /* tx management delivery notification callback functions */
640 struct {
641 struct {
642 ol_txrx_mgmt_tx_cb download_cb;
643 ol_txrx_mgmt_tx_cb ota_ack_cb;
644 void *ctxt;
645 } callbacks[OL_TXRX_MGMT_NUM_TYPES];
646 } tx_mgmt;
647
648 struct {
649 uint16_t pool_size;
650 uint16_t num_free;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530651 union ol_tx_desc_list_elem_t *array;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800652 union ol_tx_desc_list_elem_t *freelist;
653#ifdef QCA_LL_TX_FLOW_CONTROL_V2
654 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530655 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800656 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
657#endif
Leo Chang376398b2015-10-23 14:19:02 -0700658 uint32_t page_size;
659 uint16_t desc_reserved_size;
660 uint8_t page_divider;
661 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530662 struct qdf_mem_multi_page_t desc_pages;
Nirav Shah76291962016-04-25 10:50:37 +0530663#ifdef DESC_DUP_DETECT_DEBUG
Houston Hoffman088e4b92016-09-01 13:51:06 -0700664 unsigned long *free_list_bitmap;
Nirav Shah76291962016-04-25 10:50:37 +0530665#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666 } tx_desc;
667
Nirav Shah22bf44d2015-12-10 15:39:48 +0530668 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
670 struct ol_txrx_pool_stats pool_stats;
671 uint32_t num_msdu_desc;
672#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
673 struct ol_tx_flow_pool_t *mgmt_pool;
674#endif
675#endif
676
677 struct {
678 int (*cmp)(union htt_rx_pn_t *new,
679 union htt_rx_pn_t *old,
680 int is_unicast, int opmode);
681 int len;
682 } rx_pn[htt_num_sec_types];
683
684 /* tx mutex */
685 OL_TX_MUTEX_TYPE tx_mutex;
686
687 /*
688 * peer ref mutex:
689 * 1. Protect peer object lookups until the returned peer object's
690 * reference count is incremented.
691 * 2. Provide mutex when accessing peer object lookup structures.
692 */
693 OL_RX_MUTEX_TYPE peer_ref_mutex;
694
695 /*
696 * last_real_peer_mutex:
697 * Protect lookups of any vdev's last_real_peer pointer until the
698 * reference count for the pointed-to peer object is incremented.
699 * This mutex could be in the vdev struct, but it's slightly simpler
700 * to have a single lock in the pdev struct. Since the lock is only
701 * held for an extremely short time, and since it's very unlikely for
702 * two vdev's to concurrently access the lock, there's no real
703 * benefit to having a per-vdev lock.
704 */
705 OL_RX_MUTEX_TYPE last_real_peer_mutex;
706
Mohit Khanna37ffb292016-08-08 16:20:01 -0700707 qdf_spinlock_t peer_map_unmap_lock;
708
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 struct {
710 struct {
711 struct {
712 struct {
713 uint64_t ppdus;
714 uint64_t mpdus;
715 } normal;
716 struct {
717 /*
718 * mpdu_bad is general -
719 * replace it with the specific counters
720 * below
721 */
722 uint64_t mpdu_bad;
723 /* uint64_t mpdu_fcs; */
724 /* uint64_t mpdu_duplicate; */
725 /* uint64_t mpdu_pn_replay; */
726 /* uint64_t mpdu_bad_sender; */
727 /* ^ comment: peer not found */
728 /* uint64_t mpdu_flushed; */
729 /* uint64_t msdu_defrag_mic_err; */
730 uint64_t msdu_mc_dup_drop;
731 } err;
732 } rx;
733 } priv;
734 struct ol_txrx_stats pub;
735 } stats;
736
737#if defined(ENABLE_RX_REORDER_TRACE)
738 struct {
739 uint32_t mask;
740 uint32_t idx;
741 uint64_t cnt;
742#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
743 struct {
744 uint16_t reorder_idx;
745 uint16_t seq_num;
746 uint8_t num_mpdus;
747 uint8_t tid;
748 } *data;
749 } rx_reorder_trace;
750#endif /* ENABLE_RX_REORDER_TRACE */
751
752#if defined(ENABLE_RX_PN_TRACE)
753 struct {
754 uint32_t mask;
755 uint32_t idx;
756 uint64_t cnt;
757#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
758 struct {
759 struct ol_txrx_peer_t *peer;
760 uint32_t pn32;
761 uint16_t seq_num;
762 uint8_t unicast;
763 uint8_t tid;
764 } *data;
765 } rx_pn_trace;
766#endif /* ENABLE_RX_PN_TRACE */
767
768#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
769 bool host_80211_enable;
770#endif
771
772 /*
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530773 * tx_sched only applies for HL, but is defined unconditionally
774 * rather than only if defined(CONFIG_HL_SUPPORT).
775 * This is because the struct only
776 * occupies a few bytes, and to avoid the complexity of
777 * wrapping references
778 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
779 * compilation.
780 * If this struct gets expanded to a non-trivial size,
781 * then it should be
782 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
783 */
784 qdf_spinlock_t tx_queue_spinlock;
785 struct {
786 enum ol_tx_scheduler_status tx_sched_status;
787 struct ol_tx_sched_t *scheduler;
788 struct ol_tx_frms_queue_t *last_used_txq;
789 } tx_sched;
790 /*
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800791 * tx_queue only applies for HL, but is defined unconditionally to avoid
792 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
793 * conditional compilation.
794 */
795 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530796 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800797 /* threshold_lo - when to start tx desc margin replenishment */
798 uint16_t rsrc_threshold_lo;
799 /* threshold_hi - where to stop during tx desc margin
800 replenishment */
801 uint16_t rsrc_threshold_hi;
802 } tx_queue;
803
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530804#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
805#define OL_TXQ_LOG_SIZE 512
806 qdf_spinlock_t txq_log_spinlock;
807 struct {
808 int size;
809 int oldest_record_offset;
810 int offset;
811 int allow_wrap;
812 u_int32_t wrapped;
813 /* aligned to u_int32_t boundary */
814 u_int8_t data[OL_TXQ_LOG_SIZE];
815 } txq_log;
816#endif
817
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800818#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530819 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800820#endif
821
822 int rssi_update_shift;
823 int rssi_new_weight;
824#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
825 struct {
826 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
827 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530828 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
830 } local_peer_ids;
831#endif
832
833#ifdef QCA_COMPUTE_TX_DELAY
834#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
835#define QCA_TX_DELAY_NUM_CATEGORIES \
836 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
837#else
838#define QCA_TX_DELAY_NUM_CATEGORIES 1
839#endif
840 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530841 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800842 struct {
843 struct ol_tx_delay_data copies[2]; /* ping-pong */
844 int in_progress_idx;
845 uint32_t avg_start_time_ticks;
846 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
847 uint32_t tx_compl_timestamp_ticks;
848 uint32_t avg_period_ticks;
849 uint32_t hist_internal_bin_width_mult;
850 uint32_t hist_internal_bin_width_shift;
851 } tx_delay;
852
853 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
854 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
855
856#endif /* QCA_COMPUTE_TX_DELAY */
857
858 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530859 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800860 /* timer used to monitor the throttle "on" phase and
861 "off" phase */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530862 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800863 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530864 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800865 /* This is the time in ms of the throttling window, it will
866 * include an "on" phase and an "off" phase */
867 uint32_t throttle_period_ms;
868 /* Current throttle level set by the client ex. level 0,
869 level 1, etc */
870 enum throttle_level current_throttle_level;
871 /* Index that points to the phase within the throttle period */
872 enum throttle_phase current_throttle_phase;
873 /* Maximum number of frames to send to the target at one time */
874 uint32_t tx_threshold;
875 /* stores time in ms of on/off phase for each throttle level */
876 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
877 /* mark true if traffic is paused due to thermal throttling */
878 bool is_paused;
879 } tx_throttle;
880
881#ifdef IPA_OFFLOAD
882 ipa_uc_op_cb_type ipa_uc_op_cb;
883 void *osif_dev;
884#endif /* IPA_UC_OFFLOAD */
885
886#if defined(FEATURE_TSO)
887 struct {
888 uint16_t pool_size;
889 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530890 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800891 /* tso mutex */
892 OL_TX_MUTEX_TYPE tso_mutex;
893 } tso_seg_pool;
894#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530895
896#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
897 struct {
898 enum ol_tx_peer_bal_state enabled;
899 qdf_spinlock_t mutex;
900 /* timer used to trigger more frames for bad peers */
901 qdf_timer_t peer_bal_timer;
902 /*This is the time in ms of the peer balance timer period */
903 u_int32_t peer_bal_period_ms;
904 /*This is the txq limit */
905 u_int32_t peer_bal_txq_limit;
906 /*This is the state of the peer balance timer */
907 enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
908 /*This is the counter about active peers which are under
909 *tx flow control */
910 u_int32_t peer_num;
911 /*This is peer list which are under tx flow control */
912 struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
913 /*This is threshold configurationl */
914 struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
915 } tx_peer_bal;
916#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
917
918 struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
919#ifdef DEBUG_HL_LOGGING
920 qdf_spinlock_t grp_stat_spinlock;
921 struct ol_tx_group_credit_stats_t grp_stats;
922#endif
923 int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800924 uint8_t ocb_peer_valid;
925 struct ol_txrx_peer_t *ocb_peer;
926 ol_tx_pause_callback_fp pause_cb;
927
928 struct {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800929 void (*lro_flush_cb)(void *);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700930 qdf_atomic_t lro_dev_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800931 } lro_info;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -0700932 struct ol_txrx_peer_t *self_peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800933};
934
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800935struct ol_txrx_vdev_t {
936 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
937 the parent of this virtual device */
938 uint8_t vdev_id; /* ID used to specify a particular vdev
939 to the target */
940 void *osif_dev;
941 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
942 /* tx paused - NO LONGER NEEDED? */
943 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
944 of vdevs */
945 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
946 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
947 this vdev (not "self"
948 pseudo-peer) */
Dhanashri Atre182b0272016-02-17 15:35:07 -0800949 ol_txrx_rx_fp rx; /* receive function used by this vdev */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800950
951 struct {
952 /*
953 * If the vdev object couldn't be deleted immediately because
954 * it still had some peer objects left, remember that a delete
955 * was requested, so it can be deleted once all its peers have
956 * been deleted.
957 */
958 int pending;
959 /*
960 * Store a function pointer and a context argument to provide a
961 * notification for when the vdev is deleted.
962 */
963 ol_txrx_vdev_delete_cb callback;
964 void *context;
965 } delete;
966
967 /* safe mode control to bypass the encrypt and decipher process */
968 uint32_t safemode;
969
970 /* rx filter related */
971 uint32_t drop_unenc;
972 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
973 uint32_t num_filters;
974
975 enum wlan_op_mode opmode;
976
977#ifdef QCA_IBSS_SUPPORT
978 /* ibss mode related */
979 int16_t ibss_peer_num; /* the number of active peers */
980 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
981#endif
982
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530983#if defined(CONFIG_HL_SUPPORT)
984 struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
985#endif
986
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800987 struct {
988 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530989 qdf_nbuf_t head;
990 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800991 int depth;
992 } txq;
993 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530994 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530995 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800996 int max_q_depth;
997 bool is_q_paused;
998 bool is_q_timer_on;
999 uint32_t q_pause_cnt;
1000 uint32_t q_unpause_cnt;
1001 uint32_t q_overflow_cnt;
1002 } ll_pause;
1003 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301004 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001005 uint16_t tx_fl_lwm;
1006 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301007 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
1009 void *osif_fc_ctx;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301010
1011#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1012 union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
1013 bool hlTdlsFlag;
1014#endif
1015
1016#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
1017 qdf_atomic_t tx_desc_count;
1018#endif
1019
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001020 uint16_t wait_on_peer_id;
Anurag Chouhance0dc992016-02-16 18:18:03 +05301021 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001022#if defined(FEATURE_TSO)
1023 struct {
1024 int pool_elems; /* total number of elements in the pool */
1025 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301026 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001027 } tso_pool_t;
1028#endif
1029
1030 /* last channel change event recieved */
1031 struct {
1032 bool is_valid; /* whether the rest of the members are valid */
1033 uint16_t mhz;
1034 uint16_t band_center_freq1;
1035 uint16_t band_center_freq2;
1036 WLAN_PHY_MODE phy_mode;
1037 } ocb_channel_event;
1038
1039 /* Information about the schedules in the schedule */
1040 struct ol_txrx_ocb_chan_info *ocb_channel_info;
1041 uint32_t ocb_channel_count;
1042
1043#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1044 struct ol_tx_flow_pool_t *pool;
1045#endif
Himanshu Agarwal5ac2f7b2016-05-06 20:08:10 +05301046 /* intra bss forwarded tx and rx packets count */
1047 uint64_t fwd_tx_packets;
1048 uint64_t fwd_rx_packets;
Nirav Shah2e583a02016-04-30 14:06:12 +05301049 bool is_wisa_mode_enable;
Nirav Shahc657ef52016-07-26 14:22:38 +05301050 uint8_t mac_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051};
1052
1053struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +05301054 qdf_nbuf_t head;
1055 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001056};
1057
1058struct ol_rx_reorder_t {
1059 uint8_t win_sz;
1060 uint8_t win_sz_mask;
1061 uint8_t num_mpdus;
1062 struct ol_rx_reorder_array_elem_t *array;
1063 /* base - single rx reorder element used for non-aggr cases */
1064 struct ol_rx_reorder_array_elem_t base;
1065#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
1066 struct ol_rx_reorder_timeout_list_elem_t timeout;
1067#endif
1068 /* only used for defrag right now */
1069 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
1070 uint32_t defrag_timeout_ms;
1071 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
1072 * waitlist */
1073 uint16_t tid;
1074};
1075
1076enum {
1077 txrx_sec_mcast = 0,
1078 txrx_sec_ucast
1079};
1080
1081typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
1082 tx_msdu_info);
1083
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301084#define OL_TXRX_PEER_SECURITY_MULTICAST 0
1085#define OL_TXRX_PEER_SECURITY_UNICAST 1
1086#define OL_TXRX_PEER_SECURITY_MAX 2
1087
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088struct ol_txrx_peer_t {
1089 struct ol_txrx_vdev_t *vdev;
1090
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301091 qdf_atomic_t ref_cnt;
1092 qdf_atomic_t delete_in_progress;
1093 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001094
1095 /* The peer state tracking is used for HL systems
1096 * that don't support tx and rx filtering within the target.
1097 * In such systems, the peer's state determines what kind of
1098 * tx and rx filtering, if any, is done.
1099 * This variable doesn't apply to LL systems, or to HL systems for
1100 * which the target handles tx and rx filtering. However, it is
1101 * simplest to declare and update this variable unconditionally,
1102 * for all systems.
1103 */
1104 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301105 qdf_spinlock_t peer_info_lock;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05301106 qdf_spinlock_t bufq_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001107 struct list_head cached_bufq;
1108
1109 ol_tx_filter_func tx_filter;
1110
1111 /* peer ID(s) for this peer */
1112 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
1113#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
1114 uint16_t local_id;
1115#endif
1116
1117 union ol_txrx_align_mac_addr_t mac_addr;
1118
1119 /* node in the vdev's list of peers */
1120 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
1121 /* node in the hash table bin's list of peers */
1122 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
1123
1124 /*
1125 * per TID info -
1126 * stored in separate arrays to avoid alignment padding mem overhead
1127 */
1128 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
1129 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
1130 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
1131 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
1132 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
1133 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
1134
1135 struct {
1136 enum htt_sec_type sec_type;
1137 uint32_t michael_key[2]; /* relevant for TKIP */
1138 } security[2]; /* 0 -> multicast, 1 -> unicast */
1139
1140 /*
1141 * rx proc function: this either is a copy of pdev's rx_opt_proc for
1142 * regular rx processing, or has been redirected to a /dev/null discard
1143 * function when peer deletion is in progress.
1144 */
1145 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
1146 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +05301147 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301149#if defined(CONFIG_HL_SUPPORT)
1150 struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
1151#endif
1152
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001153#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
1154 ol_txrx_peer_stats_t stats;
1155#endif
1156 int16_t rssi_dbm;
1157
1158 /* NAWDS Flag and Bss Peer bit */
1159 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
1160
1161 /* QoS info */
1162 uint8_t qos_capable;
1163 /* U-APSD tid mask */
1164 uint8_t uapsd_mask;
1165 /*flag indicating key installed */
1166 uint8_t keyinstalled;
1167
1168 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05301169 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001170
1171#ifdef WLAN_FEATURE_11W
1172 /* PN counter for Robust Management Frames */
1173 uint64_t last_rmf_pn;
1174 uint32_t rmf_pn_replays;
1175 uint8_t last_rmf_pn_valid;
1176#endif
1177
1178 /* Properties of the last received PPDU */
1179 int16_t last_pkt_rssi_cmb;
1180 int16_t last_pkt_rssi[4];
1181 uint8_t last_pkt_legacy_rate;
1182 uint8_t last_pkt_legacy_rate_sel;
1183 uint32_t last_pkt_timestamp_microsec;
1184 uint8_t last_pkt_timestamp_submicrosec;
1185 uint32_t last_pkt_tsf;
1186 uint8_t last_pkt_tid;
1187 uint16_t last_pkt_center_freq;
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301188#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1189 u_int16_t tx_limit;
1190 u_int16_t tx_limit_flag;
1191 u_int16_t tx_pause_flag;
1192#endif
Krishna Kumaar Natarajanb7f9a352016-03-18 11:40:07 -07001193 qdf_time_t last_assoc_rcvd;
1194 qdf_time_t last_disassoc_rcvd;
1195 qdf_time_t last_deauth_rcvd;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001196};
1197
Nirav Shahc657ef52016-07-26 14:22:38 +05301198struct ol_rx_remote_data {
1199 qdf_nbuf_t msdu;
1200 uint8_t mac_id;
1201};
1202
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001203#endif /* _OL_TXRX_TYPES__H_ */