blob: e7f5f6bcd6c223257a96ed57a830a191807e7538 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_types.h
30 * @brief Define the major data types used internally by the host datapath SW.
31 */
32#ifndef _OL_TXRX_TYPES__H_
33#define _OL_TXRX_TYPES__H_
34
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053035#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053036#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <cds_queue.h> /* TAILQ */
38#include <a_types.h> /* A_UINT8 */
39#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053040#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053042#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053043#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#include <pktlog.h> /* ol_pktlog_dev_handle */
45#include <ol_txrx_stats.h>
46#include <txrx.h>
47#include "ol_txrx_htt_api.h"
48#include "ol_htt_tx_api.h"
49#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080050#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
51#include "ol_txrx_osif_api.h" /* ol_rx_callback_fp */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080052
53/*
54 * The target may allocate multiple IDs for a peer.
55 * In particular, the target may allocate one ID to represent the
56 * multicast key the peer uses, and another ID to represent the
57 * unicast key the peer uses.
58 */
59#define MAX_NUM_PEER_ID_PER_PEER 8
60
61#define OL_TXRX_MAC_ADDR_LEN 6
62
63/* OL_TXRX_NUM_EXT_TIDS -
64 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
65 */
66#define OL_TXRX_NUM_EXT_TIDS 19
67
68#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
69#define OL_TX_NON_QOS_TID 16
70#define OL_TX_MGMT_TID 17
71#define OL_TX_NUM_TIDS 18
72#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
73
Houston Hoffman43d47fa2016-02-24 16:34:30 -080074#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
75#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080076#define OL_TX_VDEV_NUM_QUEUES 2
77
78#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
79#define OL_TXRX_MGMT_NUM_TYPES 8
80
Anurag Chouhana37b5b72016-02-21 14:53:42 +053081#define OL_TX_MUTEX_TYPE qdf_spinlock_t
82#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083
84/* TXRX Histogram defines */
85#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
86#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
87
88struct ol_txrx_pdev_t;
89struct ol_txrx_vdev_t;
90struct ol_txrx_peer_t;
91
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092/* rx filter related */
93#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
94
95enum privacy_filter {
96 PRIVACY_FILTER_ALWAYS,
97 PRIVACY_FILTER_KEY_UNAVAILABLE,
98};
99
100enum privacy_filter_packet_type {
101 PRIVACY_FILTER_PACKET_UNICAST,
102 PRIVACY_FILTER_PACKET_MULTICAST,
103 PRIVACY_FILTER_PACKET_BOTH
104};
105
106struct privacy_exemption {
107 /* ethertype -
108 * type of ethernet frames this filter applies to, in host byte order
109 */
110 uint16_t ether_type;
111 enum privacy_filter filter_type;
112 enum privacy_filter_packet_type packet_type;
113};
114
115enum ol_tx_frm_type {
116 ol_tx_frm_std = 0, /* regular frame - no added header fragments */
117 ol_tx_frm_tso, /* TSO segment, with a modified IP header added */
118 ol_tx_frm_audio, /* audio frames, with a custom LLC/SNAP hdr added */
119 ol_tx_frm_no_free, /* frame requires special tx completion callback */
120};
121
122struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530123 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800124 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800125 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530126 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800127 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530128 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530129 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800130 enum htt_tx_status status;
131
132#ifdef QCA_COMPUTE_TX_DELAY
133 uint32_t entry_timestamp_ticks;
134#endif
135 /*
136 * Allow tx descriptors to be stored in (doubly-linked) lists.
137 * This is mainly used for HL tx queuing and scheduling, but is
138 * also used by LL+HL for batch processing of tx frames.
139 */
140 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
141
142 /*
143 * Remember whether the tx frame is a regular packet, or whether
144 * the driver added extra header fragments (e.g. a modified IP header
145 * for TSO fragments, or an added LLC/SNAP header for audio interworking
146 * data) that need to be handled in a special manner.
147 * This field is filled in with the ol_tx_frm_type enum.
148 */
149 uint8_t pkt_type;
150#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
151 /* used by tx encap, to restore the os buf start offset
152 after tx complete */
153 uint8_t orig_l2_hdr_bytes;
154#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800155#ifdef QCA_LL_TX_FLOW_CONTROL_V2
156 struct ol_tx_flow_pool_t *pool;
157#endif
158 void *tso_desc;
159};
160
161typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
162
163union ol_tx_desc_list_elem_t {
164 union ol_tx_desc_list_elem_t *next;
165 struct ol_tx_desc_t tx_desc;
166};
167
168union ol_txrx_align_mac_addr_t {
169 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
170 struct {
171 uint16_t bytes_ab;
172 uint16_t bytes_cd;
173 uint16_t bytes_ef;
174 } align2;
175 struct {
176 uint32_t bytes_abcd;
177 uint16_t bytes_ef;
178 } align4;
179};
180
181struct ol_rx_reorder_timeout_list_elem_t {
182 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
183 reorder_timeout_list_elem;
184 uint32_t timestamp_ms;
185 struct ol_txrx_peer_t *peer;
186 uint8_t tid;
187 uint8_t active;
188};
189
190#define TXRX_TID_TO_WMM_AC(_tid) ( \
191 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
192 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
193 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
194 TXRX_WMM_AC_BE)
195
196struct ol_tx_reorder_cat_timeout_t {
197 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530198 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199 uint32_t duration_ms;
200 struct ol_txrx_pdev_t *pdev;
201};
202
203enum ol_tx_queue_status {
204 ol_tx_queue_empty = 0,
205 ol_tx_queue_active,
206 ol_tx_queue_paused,
207};
208
209struct ol_txrx_msdu_info_t {
210 struct htt_msdu_info_t htt;
211 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530212 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213};
214
215enum {
216 ol_tx_aggr_untried = 0,
217 ol_tx_aggr_enabled,
218 ol_tx_aggr_disabled,
219 ol_tx_aggr_retry,
220 ol_tx_aggr_in_progress,
221};
222
223struct ol_tx_frms_queue_t {
224 /* list_elem -
225 * Allow individual tx frame queues to be linked together into
226 * scheduler queues of tx frame queues
227 */
228 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
229 uint8_t aggr_state;
230 struct {
231 uint8_t total;
232 /* pause requested by ctrl SW rather than txrx SW */
233 uint8_t by_ctrl;
234 } paused_count;
235 uint8_t ext_tid;
236 uint16_t frms;
237 uint32_t bytes;
238 ol_tx_desc_list head;
239 enum ol_tx_queue_status flag;
240};
241
242enum {
243 ol_tx_log_entry_type_invalid,
244 ol_tx_log_entry_type_queue_state,
245 ol_tx_log_entry_type_enqueue,
246 ol_tx_log_entry_type_dequeue,
247 ol_tx_log_entry_type_drop,
248 ol_tx_log_entry_type_queue_free,
249
250 ol_tx_log_entry_type_wrap,
251};
252
253struct ol_tx_log_queue_state_var_sz_t {
254 uint32_t active_bitmap;
255 uint16_t credit;
256 uint8_t num_cats_active;
257 uint8_t data[1];
258};
259
260struct ol_tx_log_queue_add_t {
261 uint8_t num_frms;
262 uint8_t tid;
263 uint16_t peer_id;
264 uint16_t num_bytes;
265};
266
267struct ol_mac_addr {
268 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
269};
270
271#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
272#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
273#endif
274
275#ifndef ol_txrx_local_peer_id_t
276#define ol_txrx_local_peer_id_t uint8_t /* default */
277#endif
278
279#ifdef QCA_COMPUTE_TX_DELAY
280/*
281 * Delay histogram bins: 16 bins of 10 ms each to count delays
282 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
283 */
284#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
285#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
286
287struct ol_tx_delay_data {
288 struct {
289 uint64_t transmit_sum_ticks;
290 uint64_t queue_sum_ticks;
291 uint32_t transmit_num;
292 uint32_t queue_num;
293 } avgs;
294 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
295};
296
297#endif /* QCA_COMPUTE_TX_DELAY */
298
299/* Thermal Mitigation */
300
301enum throttle_level {
302 THROTTLE_LEVEL_0,
303 THROTTLE_LEVEL_1,
304 THROTTLE_LEVEL_2,
305 THROTTLE_LEVEL_3,
306 /* Invalid */
307 THROTTLE_LEVEL_MAX,
308};
309
310enum throttle_phase {
311 THROTTLE_PHASE_OFF,
312 THROTTLE_PHASE_ON,
313 /* Invalid */
314 THROTTLE_PHASE_MAX,
315};
316
317#define THROTTLE_TX_THRESHOLD (100)
318
319typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
320
321#ifdef QCA_LL_TX_FLOW_CONTROL_V2
322
323/**
324 * enum flow_pool_status - flow pool status
325 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
326 * and network queues are unpaused
327 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
328 * and network queues are paused
329 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
330 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
331 */
332enum flow_pool_status {
333 FLOW_POOL_ACTIVE_UNPAUSED = 0,
334 FLOW_POOL_ACTIVE_PAUSED = 1,
335 FLOW_POOL_INVALID = 2,
336 FLOW_POOL_INACTIVE = 3,
337};
338
339/**
340 * struct ol_txrx_pool_stats - flow pool related statistics
341 * @pool_map_count: flow pool map received
342 * @pool_unmap_count: flow pool unmap received
343 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
344 * @pkt_drop_no_desc: packets dropped due to unavailablity of descriptors
345 */
346struct ol_txrx_pool_stats {
347 uint16_t pool_map_count;
348 uint16_t pool_unmap_count;
349 uint16_t pkt_drop_no_pool;
350 uint16_t pkt_drop_no_desc;
351};
352
353/**
354 * struct ol_tx_flow_pool_t - flow_pool info
355 * @flow_pool_list_elem: flow_pool_list element
356 * @flow_pool_lock: flow_pool lock
357 * @flow_pool_id: flow_pool id
358 * @flow_pool_size: flow_pool size
359 * @avail_desc: available descriptors
360 * @deficient_desc: deficient descriptors
361 * @status: flow pool status
362 * @flow_type: flow pool type
363 * @member_flow_id: member flow id
364 * @stop_th: stop threshold
365 * @start_th: start threshold
366 * @freelist: tx descriptor freelist
367 */
368struct ol_tx_flow_pool_t {
369 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530370 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371 uint8_t flow_pool_id;
372 uint16_t flow_pool_size;
373 uint16_t avail_desc;
374 uint16_t deficient_desc;
375 enum flow_pool_status status;
376 enum htt_flow_type flow_type;
377 uint8_t member_flow_id;
378 uint16_t stop_th;
379 uint16_t start_th;
380 union ol_tx_desc_list_elem_t *freelist;
381};
382
383#endif
384
385/*
386 * As depicted in the diagram below, the pdev contains an array of
387 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
388 * Each element identifies all the tx queues that are active for
389 * the TID, from the different peers.
390 *
391 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
392 * Each element identifies the tx frames for the TID that need to be sent
393 * to the peer.
394 *
395 *
396 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
397 * TID
398 * 0 1 2 17
399 * +============+============+============+== ==+============+
400 * | active (y) | active (n) | active (n) | | active (y) |
401 * |------------+------------+------------+-- --+------------|
402 * | queues | queues | queues | | queues |
403 * +============+============+============+== ==+============+
404 * | |
405 * .--+-----------------------------------------------'
406 * | |
407 * | | peer X: peer Y:
408 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
409 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
410 * | | TID +======+ TID +======+
411 * | `---->| next |-------------------------->| next |--X
412 * | 0 | prev | .------. .------. 0 | prev | .------.
413 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
414 * | +======+ `------' `------' +======+ `------'
415 * | | next | | | 1 | next | |
416 * | 1 | prev | v v | prev | v
417 * | | txq | .------. .------. | txq | .------.
418 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
419 * | | next | `------' `------' | next | `------'
420 * | 2 | prev | 2 | prev |
421 * | | txq | | txq |
422 * | +======+ +======+
423 * | | | | |
424 * |
425 * |
426 * | | | | |
427 * | +======+ +======+
428 * `------->| next |--X | next |
429 * 17 | prev | .------. 17 | prev |
430 * | txq |-->|txdesc| | txq |
431 * +======+ `------' +======+
432 * |
433 * v
434 * .------.
435 * |netbuf|
436 * `------'
437 */
438struct ol_txrx_pdev_t {
439 /* ctrl_pdev - handle for querying config info */
440 ol_pdev_handle ctrl_pdev;
441
442 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530443 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444
445 htt_pdev_handle htt_pdev;
446
447#ifdef WLAN_FEATURE_FASTPATH
448 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
449 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
450#endif /* WLAN_FEATURE_FASTPATH */
451
452 struct {
453 int is_high_latency;
454 int host_addba;
455 int ll_pause_txq_limit;
456 int default_tx_comp_req;
457 } cfg;
458
459 /* WDI subscriber's event list */
460 wdi_event_subscribe **wdi_event_list;
461
Komal Seelamc4b28632016-02-03 15:02:18 +0530462#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
463 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 /* Pktlog pdev */
465 struct ol_pktlog_dev_t *pl_dev;
466#endif /* #ifndef REMOVE_PKT_LOG */
467
468 enum ol_sec_type sec_types[htt_num_sec_types];
469 /* standard frame type */
470 enum wlan_frm_fmt frame_format;
471 enum htt_pkt_type htt_pkt_type;
472
473#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
474 /* txrx encap/decap */
475 uint8_t sw_tx_encap;
476 uint8_t sw_rx_decap;
477 uint8_t target_tx_tran_caps;
478 uint8_t target_rx_tran_caps;
479 /* llc process */
480 uint8_t sw_tx_llc_proc_enable;
481 uint8_t sw_rx_llc_proc_enable;
482 /* A-MSDU */
483 uint8_t sw_subfrm_hdr_recovery_enable;
484 /* Protected Frame bit handling */
485 uint8_t sw_pf_proc_enable;
486#endif
487 /*
488 * target tx credit -
489 * not needed for LL, but used for HL download scheduler to keep
490 * track of roughly how much space is available in the target for
491 * tx frames
492 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530493 qdf_atomic_t target_tx_credit;
494 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495
496 /* Peer mac address to staid mapping */
497 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
498
499 /* ol_txrx_vdev list */
500 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
501
502 /* peer ID to peer object map (array of pointers to peer objects) */
503 struct ol_txrx_peer_t **peer_id_to_obj_map;
504
505 struct {
506 unsigned mask;
507 unsigned idx_bits;
508 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
509 } peer_hash;
510
511 /* rx specific processing */
512 struct {
513 struct {
514 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
515 uint32_t timeout_ms;
516 } defrag;
517 struct {
518 int defrag_timeout_check;
519 int dup_check;
520 } flags;
521
522 struct {
523 struct ol_tx_reorder_cat_timeout_t
524 access_cats[TXRX_NUM_WMM_AC];
525 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530526 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800527 } rx;
528
529 /* rx proc function */
530 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
531 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530532 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800533
534 /* tx data delivery notification callback function */
535 struct {
536 ol_txrx_data_tx_cb func;
537 void *ctxt;
538 } tx_data_callback;
539
540 /* tx management delivery notification callback functions */
541 struct {
542 struct {
543 ol_txrx_mgmt_tx_cb download_cb;
544 ol_txrx_mgmt_tx_cb ota_ack_cb;
545 void *ctxt;
546 } callbacks[OL_TXRX_MGMT_NUM_TYPES];
547 } tx_mgmt;
548
549 struct {
550 uint16_t pool_size;
551 uint16_t num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800552 union ol_tx_desc_list_elem_t *freelist;
553#ifdef QCA_LL_TX_FLOW_CONTROL_V2
554 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530555 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
557#endif
Leo Chang376398b2015-10-23 14:19:02 -0700558 uint32_t page_size;
559 uint16_t desc_reserved_size;
560 uint8_t page_divider;
561 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530562 struct qdf_mem_multi_page_t desc_pages;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563 } tx_desc;
564
Nirav Shah22bf44d2015-12-10 15:39:48 +0530565 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800566#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
567 struct ol_txrx_pool_stats pool_stats;
568 uint32_t num_msdu_desc;
569#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
570 struct ol_tx_flow_pool_t *mgmt_pool;
571#endif
572#endif
573
574 struct {
575 int (*cmp)(union htt_rx_pn_t *new,
576 union htt_rx_pn_t *old,
577 int is_unicast, int opmode);
578 int len;
579 } rx_pn[htt_num_sec_types];
580
581 /* tx mutex */
582 OL_TX_MUTEX_TYPE tx_mutex;
583
584 /*
585 * peer ref mutex:
586 * 1. Protect peer object lookups until the returned peer object's
587 * reference count is incremented.
588 * 2. Provide mutex when accessing peer object lookup structures.
589 */
590 OL_RX_MUTEX_TYPE peer_ref_mutex;
591
592 /*
593 * last_real_peer_mutex:
594 * Protect lookups of any vdev's last_real_peer pointer until the
595 * reference count for the pointed-to peer object is incremented.
596 * This mutex could be in the vdev struct, but it's slightly simpler
597 * to have a single lock in the pdev struct. Since the lock is only
598 * held for an extremely short time, and since it's very unlikely for
599 * two vdev's to concurrently access the lock, there's no real
600 * benefit to having a per-vdev lock.
601 */
602 OL_RX_MUTEX_TYPE last_real_peer_mutex;
603
604 struct {
605 struct {
606 struct {
607 struct {
608 uint64_t ppdus;
609 uint64_t mpdus;
610 } normal;
611 struct {
612 /*
613 * mpdu_bad is general -
614 * replace it with the specific counters
615 * below
616 */
617 uint64_t mpdu_bad;
618 /* uint64_t mpdu_fcs; */
619 /* uint64_t mpdu_duplicate; */
620 /* uint64_t mpdu_pn_replay; */
621 /* uint64_t mpdu_bad_sender; */
622 /* ^ comment: peer not found */
623 /* uint64_t mpdu_flushed; */
624 /* uint64_t msdu_defrag_mic_err; */
625 uint64_t msdu_mc_dup_drop;
626 } err;
627 } rx;
628 } priv;
629 struct ol_txrx_stats pub;
630 } stats;
631
632#if defined(ENABLE_RX_REORDER_TRACE)
633 struct {
634 uint32_t mask;
635 uint32_t idx;
636 uint64_t cnt;
637#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
638 struct {
639 uint16_t reorder_idx;
640 uint16_t seq_num;
641 uint8_t num_mpdus;
642 uint8_t tid;
643 } *data;
644 } rx_reorder_trace;
645#endif /* ENABLE_RX_REORDER_TRACE */
646
647#if defined(ENABLE_RX_PN_TRACE)
648 struct {
649 uint32_t mask;
650 uint32_t idx;
651 uint64_t cnt;
652#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
653 struct {
654 struct ol_txrx_peer_t *peer;
655 uint32_t pn32;
656 uint16_t seq_num;
657 uint8_t unicast;
658 uint8_t tid;
659 } *data;
660 } rx_pn_trace;
661#endif /* ENABLE_RX_PN_TRACE */
662
663#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
664 bool host_80211_enable;
665#endif
666
667 /*
668 * tx_queue only applies for HL, but is defined unconditionally to avoid
669 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
670 * conditional compilation.
671 */
672 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530673 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 /* threshold_lo - when to start tx desc margin replenishment */
675 uint16_t rsrc_threshold_lo;
676 /* threshold_hi - where to stop during tx desc margin
677 replenishment */
678 uint16_t rsrc_threshold_hi;
679 } tx_queue;
680
681#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530682 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683#endif
684
685 int rssi_update_shift;
686 int rssi_new_weight;
687#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
688 struct {
689 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
690 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530691 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800692 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
693 } local_peer_ids;
694#endif
695
696#ifdef QCA_COMPUTE_TX_DELAY
697#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
698#define QCA_TX_DELAY_NUM_CATEGORIES \
699 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
700#else
701#define QCA_TX_DELAY_NUM_CATEGORIES 1
702#endif
703 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530704 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800705 struct {
706 struct ol_tx_delay_data copies[2]; /* ping-pong */
707 int in_progress_idx;
708 uint32_t avg_start_time_ticks;
709 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
710 uint32_t tx_compl_timestamp_ticks;
711 uint32_t avg_period_ticks;
712 uint32_t hist_internal_bin_width_mult;
713 uint32_t hist_internal_bin_width_shift;
714 } tx_delay;
715
716 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
717 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
718
719#endif /* QCA_COMPUTE_TX_DELAY */
720
721 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530722 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 /* timer used to monitor the throttle "on" phase and
724 "off" phase */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530725 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530727 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800728 /* This is the time in ms of the throttling window, it will
729 * include an "on" phase and an "off" phase */
730 uint32_t throttle_period_ms;
731 /* Current throttle level set by the client ex. level 0,
732 level 1, etc */
733 enum throttle_level current_throttle_level;
734 /* Index that points to the phase within the throttle period */
735 enum throttle_phase current_throttle_phase;
736 /* Maximum number of frames to send to the target at one time */
737 uint32_t tx_threshold;
738 /* stores time in ms of on/off phase for each throttle level */
739 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
740 /* mark true if traffic is paused due to thermal throttling */
741 bool is_paused;
742 } tx_throttle;
743
744#ifdef IPA_OFFLOAD
745 ipa_uc_op_cb_type ipa_uc_op_cb;
746 void *osif_dev;
747#endif /* IPA_UC_OFFLOAD */
748
749#if defined(FEATURE_TSO)
750 struct {
751 uint16_t pool_size;
752 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530753 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800754 /* tso mutex */
755 OL_TX_MUTEX_TYPE tso_mutex;
756 } tso_seg_pool;
757#endif
758 uint8_t ocb_peer_valid;
759 struct ol_txrx_peer_t *ocb_peer;
760 ol_tx_pause_callback_fp pause_cb;
761
762 struct {
763 void *lro_data;
764 void (*lro_flush_cb)(void *);
765 } lro_info;
766};
767
768struct ol_txrx_ocb_chan_info {
769 uint32_t chan_freq;
770 uint16_t disable_rx_stats_hdr:1;
771};
772
773struct ol_txrx_vdev_t {
774 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
775 the parent of this virtual device */
776 uint8_t vdev_id; /* ID used to specify a particular vdev
777 to the target */
778 void *osif_dev;
779 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
780 /* tx paused - NO LONGER NEEDED? */
781 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
782 of vdevs */
783 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
784 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
785 this vdev (not "self"
786 pseudo-peer) */
787 ol_txrx_tx_fp tx; /* transmit function used by this vdev */
788
789 struct {
790 /*
791 * If the vdev object couldn't be deleted immediately because
792 * it still had some peer objects left, remember that a delete
793 * was requested, so it can be deleted once all its peers have
794 * been deleted.
795 */
796 int pending;
797 /*
798 * Store a function pointer and a context argument to provide a
799 * notification for when the vdev is deleted.
800 */
801 ol_txrx_vdev_delete_cb callback;
802 void *context;
803 } delete;
804
805 /* safe mode control to bypass the encrypt and decipher process */
806 uint32_t safemode;
807
808 /* rx filter related */
809 uint32_t drop_unenc;
810 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
811 uint32_t num_filters;
812
813 enum wlan_op_mode opmode;
814
815#ifdef QCA_IBSS_SUPPORT
816 /* ibss mode related */
817 int16_t ibss_peer_num; /* the number of active peers */
818 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
819#endif
820
821 struct {
822 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530823 qdf_nbuf_t head;
824 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800825 int depth;
826 } txq;
827 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530828 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530829 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800830 int max_q_depth;
831 bool is_q_paused;
832 bool is_q_timer_on;
833 uint32_t q_pause_cnt;
834 uint32_t q_unpause_cnt;
835 uint32_t q_overflow_cnt;
836 } ll_pause;
837 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530838 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800839 uint16_t tx_fl_lwm;
840 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530841 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800842 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
843 void *osif_fc_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800844 uint16_t wait_on_peer_id;
Anurag Chouhance0dc992016-02-16 18:18:03 +0530845 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800846#if defined(FEATURE_TSO)
847 struct {
848 int pool_elems; /* total number of elements in the pool */
849 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530850 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800851 } tso_pool_t;
852#endif
853
854 /* last channel change event recieved */
855 struct {
856 bool is_valid; /* whether the rest of the members are valid */
857 uint16_t mhz;
858 uint16_t band_center_freq1;
859 uint16_t band_center_freq2;
860 WLAN_PHY_MODE phy_mode;
861 } ocb_channel_event;
862
863 /* Information about the schedules in the schedule */
864 struct ol_txrx_ocb_chan_info *ocb_channel_info;
865 uint32_t ocb_channel_count;
866
867#ifdef QCA_LL_TX_FLOW_CONTROL_V2
868 struct ol_tx_flow_pool_t *pool;
869#endif
870};
871
872struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530873 qdf_nbuf_t head;
874 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800875};
876
877struct ol_rx_reorder_t {
878 uint8_t win_sz;
879 uint8_t win_sz_mask;
880 uint8_t num_mpdus;
881 struct ol_rx_reorder_array_elem_t *array;
882 /* base - single rx reorder element used for non-aggr cases */
883 struct ol_rx_reorder_array_elem_t base;
884#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
885 struct ol_rx_reorder_timeout_list_elem_t timeout;
886#endif
887 /* only used for defrag right now */
888 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
889 uint32_t defrag_timeout_ms;
890 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
891 * waitlist */
892 uint16_t tid;
893};
894
895enum {
896 txrx_sec_mcast = 0,
897 txrx_sec_ucast
898};
899
900typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
901 tx_msdu_info);
902
903struct ol_txrx_peer_t {
904 struct ol_txrx_vdev_t *vdev;
905
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530906 qdf_atomic_t ref_cnt;
907 qdf_atomic_t delete_in_progress;
908 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800909
910 /* The peer state tracking is used for HL systems
911 * that don't support tx and rx filtering within the target.
912 * In such systems, the peer's state determines what kind of
913 * tx and rx filtering, if any, is done.
914 * This variable doesn't apply to LL systems, or to HL systems for
915 * which the target handles tx and rx filtering. However, it is
916 * simplest to declare and update this variable unconditionally,
917 * for all systems.
918 */
919 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530920 qdf_spinlock_t peer_info_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800921 ol_rx_callback_fp osif_rx;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530922 qdf_spinlock_t bufq_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800923 struct list_head cached_bufq;
924
925 ol_tx_filter_func tx_filter;
926
927 /* peer ID(s) for this peer */
928 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
929#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
930 uint16_t local_id;
931#endif
932
933 union ol_txrx_align_mac_addr_t mac_addr;
934
935 /* node in the vdev's list of peers */
936 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
937 /* node in the hash table bin's list of peers */
938 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
939
940 /*
941 * per TID info -
942 * stored in separate arrays to avoid alignment padding mem overhead
943 */
944 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
945 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
946 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
947 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
948 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
949 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
950
951 struct {
952 enum htt_sec_type sec_type;
953 uint32_t michael_key[2]; /* relevant for TKIP */
954 } security[2]; /* 0 -> multicast, 1 -> unicast */
955
956 /*
957 * rx proc function: this either is a copy of pdev's rx_opt_proc for
958 * regular rx processing, or has been redirected to a /dev/null discard
959 * function when peer deletion is in progress.
960 */
961 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
962 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530963 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800964
965#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
966 ol_txrx_peer_stats_t stats;
967#endif
968 int16_t rssi_dbm;
969
970 /* NAWDS Flag and Bss Peer bit */
971 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
972
973 /* QoS info */
974 uint8_t qos_capable;
975 /* U-APSD tid mask */
976 uint8_t uapsd_mask;
977 /*flag indicating key installed */
978 uint8_t keyinstalled;
979
980 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530981 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800982
983#ifdef WLAN_FEATURE_11W
984 /* PN counter for Robust Management Frames */
985 uint64_t last_rmf_pn;
986 uint32_t rmf_pn_replays;
987 uint8_t last_rmf_pn_valid;
988#endif
989
990 /* Properties of the last received PPDU */
991 int16_t last_pkt_rssi_cmb;
992 int16_t last_pkt_rssi[4];
993 uint8_t last_pkt_legacy_rate;
994 uint8_t last_pkt_legacy_rate_sel;
995 uint32_t last_pkt_timestamp_microsec;
996 uint8_t last_pkt_timestamp_submicrosec;
997 uint32_t last_pkt_tsf;
998 uint8_t last_pkt_tid;
999 uint16_t last_pkt_center_freq;
1000};
1001
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08001002enum ol_rx_err_type {
1003 OL_RX_ERR_DEFRAG_MIC,
1004 OL_RX_ERR_PN,
1005 OL_RX_ERR_UNKNOWN_PEER,
1006 OL_RX_ERR_MALFORMED,
1007 OL_RX_ERR_TKIP_MIC,
1008 OL_RX_ERR_DECRYPT,
1009 OL_RX_ERR_MPDU_LENGTH,
1010 OL_RX_ERR_ENCRYPT_REQUIRED,
1011 OL_RX_ERR_DUP,
1012 OL_RX_ERR_UNKNOWN,
1013 OL_RX_ERR_FCS,
1014 OL_RX_ERR_PRIVACY,
1015 OL_RX_ERR_NONE_FRAG,
1016 OL_RX_ERR_NONE = 0xFF
1017};
1018
1019/**
1020 * ol_mic_error_info - carries the information associated with
1021 * a MIC error
1022 * @vdev_id: virtual device ID
1023 * @key_id: Key ID
1024 * @pn: packet number
1025 * @sa: source address
1026 * @da: destination address
1027 * @ta: transmitter address
1028 */
1029struct ol_mic_error_info {
1030 uint8_t vdev_id;
1031 uint32_t key_id;
1032 uint64_t pn;
1033 uint8_t sa[OL_TXRX_MAC_ADDR_LEN];
1034 uint8_t da[OL_TXRX_MAC_ADDR_LEN];
1035 uint8_t ta[OL_TXRX_MAC_ADDR_LEN];
1036};
1037
1038/**
1039 * ol_error_info - carries the information associated with an
1040 * error indicated by the firmware
1041 * @mic_err: MIC error information
1042 */
1043struct ol_error_info {
1044 union {
1045 struct ol_mic_error_info mic_err;
1046 } u;
1047};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001048#endif /* _OL_TXRX_TYPES__H_ */