blob: ff36e192ae6be4343ca5605b0332bf707d24577e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_types.h
30 * @brief Define the major data types used internally by the host datapath SW.
31 */
32#ifndef _OL_TXRX_TYPES__H_
33#define _OL_TXRX_TYPES__H_
34
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053035#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan600c3a02016-03-01 10:33:54 +053036#include <qdf_mem.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080037#include <cds_queue.h> /* TAILQ */
38#include <a_types.h> /* A_UINT8 */
39#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +053040#include <qdf_atomic.h> /* qdf_atomic_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080041#include <wdi_event_api.h> /* wdi_event_subscribe */
Anurag Chouhan754fbd82016-02-19 17:00:08 +053042#include <qdf_timer.h> /* qdf_timer_t */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053043#include <qdf_lock.h> /* qdf_spinlock */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044#include <pktlog.h> /* ol_pktlog_dev_handle */
45#include <ol_txrx_stats.h>
46#include <txrx.h>
47#include "ol_txrx_htt_api.h"
48#include "ol_htt_tx_api.h"
49#include "ol_htt_rx_api.h"
Dhanashri Atre12a08392016-02-17 13:10:34 -080050#include "ol_txrx_ctrl_api.h" /* WLAN_MAX_STA_COUNT */
51#include "ol_txrx_osif_api.h" /* ol_rx_callback_fp */
Dhanashri Atreb08959a2016-03-01 17:28:03 -080052#include "cdp_txrx_flow_ctrl_v2.h"
53#include "cdp_txrx_peer_ops.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
55/*
56 * The target may allocate multiple IDs for a peer.
57 * In particular, the target may allocate one ID to represent the
58 * multicast key the peer uses, and another ID to represent the
59 * unicast key the peer uses.
60 */
61#define MAX_NUM_PEER_ID_PER_PEER 8
62
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070063#define OL_TXRX_INVALID_NUM_PEERS (-1)
64
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080065#define OL_TXRX_MAC_ADDR_LEN 6
66
67/* OL_TXRX_NUM_EXT_TIDS -
68 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
69 */
70#define OL_TXRX_NUM_EXT_TIDS 19
71
72#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
73#define OL_TX_NON_QOS_TID 16
74#define OL_TX_MGMT_TID 17
75#define OL_TX_NUM_TIDS 18
76#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
77
Houston Hoffman43d47fa2016-02-24 16:34:30 -080078#define OL_TX_VDEV_MCAST_BCAST 0 /* HTT_TX_EXT_TID_MCAST_BCAST */
79#define OL_TX_VDEV_DEFAULT_MGMT 1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080080#define OL_TX_VDEV_NUM_QUEUES 2
81
82#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
83#define OL_TXRX_MGMT_NUM_TYPES 8
84
Anurag Chouhana37b5b72016-02-21 14:53:42 +053085#define OL_TX_MUTEX_TYPE qdf_spinlock_t
86#define OL_RX_MUTEX_TYPE qdf_spinlock_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080087
88/* TXRX Histogram defines */
89#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
90#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
91
92struct ol_txrx_pdev_t;
93struct ol_txrx_vdev_t;
94struct ol_txrx_peer_t;
95
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080096/* rx filter related */
97#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
98
99enum privacy_filter {
100 PRIVACY_FILTER_ALWAYS,
101 PRIVACY_FILTER_KEY_UNAVAILABLE,
102};
103
104enum privacy_filter_packet_type {
105 PRIVACY_FILTER_PACKET_UNICAST,
106 PRIVACY_FILTER_PACKET_MULTICAST,
107 PRIVACY_FILTER_PACKET_BOTH
108};
109
110struct privacy_exemption {
111 /* ethertype -
112 * type of ethernet frames this filter applies to, in host byte order
113 */
114 uint16_t ether_type;
115 enum privacy_filter filter_type;
116 enum privacy_filter_packet_type packet_type;
117};
118
119enum ol_tx_frm_type {
Prakash Manjunathappa6dc1a962016-05-05 19:32:53 -0700120 OL_TX_FRM_STD = 0, /* regular frame - no added header fragments */
121 OL_TX_FRM_TSO, /* TSO segment, with a modified IP header added */
122 OL_TX_FRM_AUDIO, /* audio frames, with a custom LLC/SNAP hdr added */
123 OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800124};
125
126struct ol_tx_desc_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530127 qdf_nbuf_t netbuf;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128 void *htt_tx_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800129 uint16_t id;
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530130 qdf_dma_addr_t htt_tx_desc_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800131 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
Anurag Chouhandf2b2682016-02-29 14:15:27 +0530132 qdf_dma_addr_t htt_frag_desc_paddr;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530133 qdf_atomic_t ref_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800134 enum htt_tx_status status;
135
136#ifdef QCA_COMPUTE_TX_DELAY
137 uint32_t entry_timestamp_ticks;
138#endif
139 /*
140 * Allow tx descriptors to be stored in (doubly-linked) lists.
141 * This is mainly used for HL tx queuing and scheduling, but is
142 * also used by LL+HL for batch processing of tx frames.
143 */
144 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
145
146 /*
147 * Remember whether the tx frame is a regular packet, or whether
148 * the driver added extra header fragments (e.g. a modified IP header
149 * for TSO fragments, or an added LLC/SNAP header for audio interworking
150 * data) that need to be handled in a special manner.
151 * This field is filled in with the ol_tx_frm_type enum.
152 */
153 uint8_t pkt_type;
154#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
155 /* used by tx encap, to restore the os buf start offset
156 after tx complete */
157 uint8_t orig_l2_hdr_bytes;
158#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800159#ifdef QCA_LL_TX_FLOW_CONTROL_V2
160 struct ol_tx_flow_pool_t *pool;
161#endif
162 void *tso_desc;
163};
164
165typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
166
167union ol_tx_desc_list_elem_t {
168 union ol_tx_desc_list_elem_t *next;
169 struct ol_tx_desc_t tx_desc;
170};
171
172union ol_txrx_align_mac_addr_t {
173 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
174 struct {
175 uint16_t bytes_ab;
176 uint16_t bytes_cd;
177 uint16_t bytes_ef;
178 } align2;
179 struct {
180 uint32_t bytes_abcd;
181 uint16_t bytes_ef;
182 } align4;
183};
184
185struct ol_rx_reorder_timeout_list_elem_t {
186 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
187 reorder_timeout_list_elem;
188 uint32_t timestamp_ms;
189 struct ol_txrx_peer_t *peer;
190 uint8_t tid;
191 uint8_t active;
192};
193
194#define TXRX_TID_TO_WMM_AC(_tid) ( \
195 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
196 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
197 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
198 TXRX_WMM_AC_BE)
199
200struct ol_tx_reorder_cat_timeout_t {
201 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530202 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203 uint32_t duration_ms;
204 struct ol_txrx_pdev_t *pdev;
205};
206
207enum ol_tx_queue_status {
208 ol_tx_queue_empty = 0,
209 ol_tx_queue_active,
210 ol_tx_queue_paused,
211};
212
213struct ol_txrx_msdu_info_t {
214 struct htt_msdu_info_t htt;
215 struct ol_txrx_peer_t *peer;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530216 struct qdf_tso_info_t tso_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800217};
218
219enum {
220 ol_tx_aggr_untried = 0,
221 ol_tx_aggr_enabled,
222 ol_tx_aggr_disabled,
223 ol_tx_aggr_retry,
224 ol_tx_aggr_in_progress,
225};
226
227struct ol_tx_frms_queue_t {
228 /* list_elem -
229 * Allow individual tx frame queues to be linked together into
230 * scheduler queues of tx frame queues
231 */
232 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
233 uint8_t aggr_state;
234 struct {
235 uint8_t total;
236 /* pause requested by ctrl SW rather than txrx SW */
237 uint8_t by_ctrl;
238 } paused_count;
239 uint8_t ext_tid;
240 uint16_t frms;
241 uint32_t bytes;
242 ol_tx_desc_list head;
243 enum ol_tx_queue_status flag;
244};
245
246enum {
247 ol_tx_log_entry_type_invalid,
248 ol_tx_log_entry_type_queue_state,
249 ol_tx_log_entry_type_enqueue,
250 ol_tx_log_entry_type_dequeue,
251 ol_tx_log_entry_type_drop,
252 ol_tx_log_entry_type_queue_free,
253
254 ol_tx_log_entry_type_wrap,
255};
256
257struct ol_tx_log_queue_state_var_sz_t {
258 uint32_t active_bitmap;
259 uint16_t credit;
260 uint8_t num_cats_active;
261 uint8_t data[1];
262};
263
264struct ol_tx_log_queue_add_t {
265 uint8_t num_frms;
266 uint8_t tid;
267 uint16_t peer_id;
268 uint16_t num_bytes;
269};
270
271struct ol_mac_addr {
272 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
273};
274
275#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
276#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
277#endif
278
279#ifndef ol_txrx_local_peer_id_t
280#define ol_txrx_local_peer_id_t uint8_t /* default */
281#endif
282
283#ifdef QCA_COMPUTE_TX_DELAY
284/*
285 * Delay histogram bins: 16 bins of 10 ms each to count delays
286 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
287 */
288#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
289#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
290
291struct ol_tx_delay_data {
292 struct {
293 uint64_t transmit_sum_ticks;
294 uint64_t queue_sum_ticks;
295 uint32_t transmit_num;
296 uint32_t queue_num;
297 } avgs;
298 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
299};
300
301#endif /* QCA_COMPUTE_TX_DELAY */
302
303/* Thermal Mitigation */
304
305enum throttle_level {
306 THROTTLE_LEVEL_0,
307 THROTTLE_LEVEL_1,
308 THROTTLE_LEVEL_2,
309 THROTTLE_LEVEL_3,
310 /* Invalid */
311 THROTTLE_LEVEL_MAX,
312};
313
314enum throttle_phase {
315 THROTTLE_PHASE_OFF,
316 THROTTLE_PHASE_ON,
317 /* Invalid */
318 THROTTLE_PHASE_MAX,
319};
320
321#define THROTTLE_TX_THRESHOLD (100)
322
323typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
324
325#ifdef QCA_LL_TX_FLOW_CONTROL_V2
326
327/**
328 * enum flow_pool_status - flow pool status
329 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
330 * and network queues are unpaused
331 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
332 * and network queues are paused
333 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
334 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
335 */
336enum flow_pool_status {
337 FLOW_POOL_ACTIVE_UNPAUSED = 0,
338 FLOW_POOL_ACTIVE_PAUSED = 1,
339 FLOW_POOL_INVALID = 2,
340 FLOW_POOL_INACTIVE = 3,
341};
342
343/**
344 * struct ol_txrx_pool_stats - flow pool related statistics
345 * @pool_map_count: flow pool map received
346 * @pool_unmap_count: flow pool unmap received
347 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
348 * @pkt_drop_no_desc: packets dropped due to unavailablity of descriptors
349 */
350struct ol_txrx_pool_stats {
351 uint16_t pool_map_count;
352 uint16_t pool_unmap_count;
353 uint16_t pkt_drop_no_pool;
354 uint16_t pkt_drop_no_desc;
355};
356
357/**
358 * struct ol_tx_flow_pool_t - flow_pool info
359 * @flow_pool_list_elem: flow_pool_list element
360 * @flow_pool_lock: flow_pool lock
361 * @flow_pool_id: flow_pool id
362 * @flow_pool_size: flow_pool size
363 * @avail_desc: available descriptors
364 * @deficient_desc: deficient descriptors
365 * @status: flow pool status
366 * @flow_type: flow pool type
367 * @member_flow_id: member flow id
368 * @stop_th: stop threshold
369 * @start_th: start threshold
370 * @freelist: tx descriptor freelist
371 */
372struct ol_tx_flow_pool_t {
373 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530374 qdf_spinlock_t flow_pool_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800375 uint8_t flow_pool_id;
376 uint16_t flow_pool_size;
377 uint16_t avail_desc;
378 uint16_t deficient_desc;
379 enum flow_pool_status status;
380 enum htt_flow_type flow_type;
381 uint8_t member_flow_id;
382 uint16_t stop_th;
383 uint16_t start_th;
384 union ol_tx_desc_list_elem_t *freelist;
385};
386
387#endif
388
389/*
390 * As depicted in the diagram below, the pdev contains an array of
391 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
392 * Each element identifies all the tx queues that are active for
393 * the TID, from the different peers.
394 *
395 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
396 * Each element identifies the tx frames for the TID that need to be sent
397 * to the peer.
398 *
399 *
400 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
401 * TID
402 * 0 1 2 17
403 * +============+============+============+== ==+============+
404 * | active (y) | active (n) | active (n) | | active (y) |
405 * |------------+------------+------------+-- --+------------|
406 * | queues | queues | queues | | queues |
407 * +============+============+============+== ==+============+
408 * | |
409 * .--+-----------------------------------------------'
410 * | |
411 * | | peer X: peer Y:
412 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
413 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
414 * | | TID +======+ TID +======+
415 * | `---->| next |-------------------------->| next |--X
416 * | 0 | prev | .------. .------. 0 | prev | .------.
417 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
418 * | +======+ `------' `------' +======+ `------'
419 * | | next | | | 1 | next | |
420 * | 1 | prev | v v | prev | v
421 * | | txq | .------. .------. | txq | .------.
422 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
423 * | | next | `------' `------' | next | `------'
424 * | 2 | prev | 2 | prev |
425 * | | txq | | txq |
426 * | +======+ +======+
427 * | | | | |
428 * |
429 * |
430 * | | | | |
431 * | +======+ +======+
432 * `------->| next |--X | next |
433 * 17 | prev | .------. 17 | prev |
434 * | txq |-->|txdesc| | txq |
435 * +======+ `------' +======+
436 * |
437 * v
438 * .------.
439 * |netbuf|
440 * `------'
441 */
442struct ol_txrx_pdev_t {
443 /* ctrl_pdev - handle for querying config info */
444 ol_pdev_handle ctrl_pdev;
445
446 /* osdev - handle for mem alloc / free, map / unmap */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530447 qdf_device_t osdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448
449 htt_pdev_handle htt_pdev;
450
451#ifdef WLAN_FEATURE_FASTPATH
452 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
453 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
454#endif /* WLAN_FEATURE_FASTPATH */
455
456 struct {
457 int is_high_latency;
458 int host_addba;
459 int ll_pause_txq_limit;
460 int default_tx_comp_req;
461 } cfg;
462
463 /* WDI subscriber's event list */
464 wdi_event_subscribe **wdi_event_list;
465
Komal Seelamc4b28632016-02-03 15:02:18 +0530466#if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
467 bool pkt_log_init;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800468 /* Pktlog pdev */
469 struct ol_pktlog_dev_t *pl_dev;
470#endif /* #ifndef REMOVE_PKT_LOG */
471
472 enum ol_sec_type sec_types[htt_num_sec_types];
473 /* standard frame type */
474 enum wlan_frm_fmt frame_format;
475 enum htt_pkt_type htt_pkt_type;
476
477#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
478 /* txrx encap/decap */
479 uint8_t sw_tx_encap;
480 uint8_t sw_rx_decap;
481 uint8_t target_tx_tran_caps;
482 uint8_t target_rx_tran_caps;
483 /* llc process */
484 uint8_t sw_tx_llc_proc_enable;
485 uint8_t sw_rx_llc_proc_enable;
486 /* A-MSDU */
487 uint8_t sw_subfrm_hdr_recovery_enable;
488 /* Protected Frame bit handling */
489 uint8_t sw_pf_proc_enable;
490#endif
491 /*
492 * target tx credit -
493 * not needed for LL, but used for HL download scheduler to keep
494 * track of roughly how much space is available in the target for
495 * tx frames
496 */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530497 qdf_atomic_t target_tx_credit;
498 qdf_atomic_t orig_target_tx_credit;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499
500 /* Peer mac address to staid mapping */
501 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
502
503 /* ol_txrx_vdev list */
504 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
505
506 /* peer ID to peer object map (array of pointers to peer objects) */
507 struct ol_txrx_peer_t **peer_id_to_obj_map;
508
509 struct {
510 unsigned mask;
511 unsigned idx_bits;
512 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
513 } peer_hash;
514
515 /* rx specific processing */
516 struct {
517 struct {
518 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
519 uint32_t timeout_ms;
520 } defrag;
521 struct {
522 int defrag_timeout_check;
523 int dup_check;
524 } flags;
525
526 struct {
527 struct ol_tx_reorder_cat_timeout_t
528 access_cats[TXRX_NUM_WMM_AC];
529 } reorder_timeout;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530530 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 } rx;
532
533 /* rx proc function */
534 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
535 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530536 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537
538 /* tx data delivery notification callback function */
539 struct {
540 ol_txrx_data_tx_cb func;
541 void *ctxt;
542 } tx_data_callback;
543
544 /* tx management delivery notification callback functions */
545 struct {
546 struct {
547 ol_txrx_mgmt_tx_cb download_cb;
548 ol_txrx_mgmt_tx_cb ota_ack_cb;
549 void *ctxt;
550 } callbacks[OL_TXRX_MGMT_NUM_TYPES];
551 } tx_mgmt;
552
553 struct {
554 uint16_t pool_size;
555 uint16_t num_free;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 union ol_tx_desc_list_elem_t *freelist;
557#ifdef QCA_LL_TX_FLOW_CONTROL_V2
558 uint8_t num_invalid_bin;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530559 qdf_spinlock_t flow_pool_list_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800560 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
561#endif
Leo Chang376398b2015-10-23 14:19:02 -0700562 uint32_t page_size;
563 uint16_t desc_reserved_size;
564 uint8_t page_divider;
565 uint32_t offset_filter;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530566 struct qdf_mem_multi_page_t desc_pages;
Nirav Shah76291962016-04-25 10:50:37 +0530567#ifdef DESC_DUP_DETECT_DEBUG
568 uint32_t *free_list_bitmap;
569#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 } tx_desc;
571
Nirav Shah22bf44d2015-12-10 15:39:48 +0530572 uint8_t is_mgmt_over_wmi_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
574 struct ol_txrx_pool_stats pool_stats;
575 uint32_t num_msdu_desc;
576#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
577 struct ol_tx_flow_pool_t *mgmt_pool;
578#endif
579#endif
580
581 struct {
582 int (*cmp)(union htt_rx_pn_t *new,
583 union htt_rx_pn_t *old,
584 int is_unicast, int opmode);
585 int len;
586 } rx_pn[htt_num_sec_types];
587
588 /* tx mutex */
589 OL_TX_MUTEX_TYPE tx_mutex;
590
591 /*
592 * peer ref mutex:
593 * 1. Protect peer object lookups until the returned peer object's
594 * reference count is incremented.
595 * 2. Provide mutex when accessing peer object lookup structures.
596 */
597 OL_RX_MUTEX_TYPE peer_ref_mutex;
598
599 /*
600 * last_real_peer_mutex:
601 * Protect lookups of any vdev's last_real_peer pointer until the
602 * reference count for the pointed-to peer object is incremented.
603 * This mutex could be in the vdev struct, but it's slightly simpler
604 * to have a single lock in the pdev struct. Since the lock is only
605 * held for an extremely short time, and since it's very unlikely for
606 * two vdev's to concurrently access the lock, there's no real
607 * benefit to having a per-vdev lock.
608 */
609 OL_RX_MUTEX_TYPE last_real_peer_mutex;
610
611 struct {
612 struct {
613 struct {
614 struct {
615 uint64_t ppdus;
616 uint64_t mpdus;
617 } normal;
618 struct {
619 /*
620 * mpdu_bad is general -
621 * replace it with the specific counters
622 * below
623 */
624 uint64_t mpdu_bad;
625 /* uint64_t mpdu_fcs; */
626 /* uint64_t mpdu_duplicate; */
627 /* uint64_t mpdu_pn_replay; */
628 /* uint64_t mpdu_bad_sender; */
629 /* ^ comment: peer not found */
630 /* uint64_t mpdu_flushed; */
631 /* uint64_t msdu_defrag_mic_err; */
632 uint64_t msdu_mc_dup_drop;
633 } err;
634 } rx;
635 } priv;
636 struct ol_txrx_stats pub;
637 } stats;
638
639#if defined(ENABLE_RX_REORDER_TRACE)
640 struct {
641 uint32_t mask;
642 uint32_t idx;
643 uint64_t cnt;
644#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
645 struct {
646 uint16_t reorder_idx;
647 uint16_t seq_num;
648 uint8_t num_mpdus;
649 uint8_t tid;
650 } *data;
651 } rx_reorder_trace;
652#endif /* ENABLE_RX_REORDER_TRACE */
653
654#if defined(ENABLE_RX_PN_TRACE)
655 struct {
656 uint32_t mask;
657 uint32_t idx;
658 uint64_t cnt;
659#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
660 struct {
661 struct ol_txrx_peer_t *peer;
662 uint32_t pn32;
663 uint16_t seq_num;
664 uint8_t unicast;
665 uint8_t tid;
666 } *data;
667 } rx_pn_trace;
668#endif /* ENABLE_RX_PN_TRACE */
669
670#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
671 bool host_80211_enable;
672#endif
673
674 /*
675 * tx_queue only applies for HL, but is defined unconditionally to avoid
676 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
677 * conditional compilation.
678 */
679 struct {
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530680 qdf_atomic_t rsrc_cnt;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800681 /* threshold_lo - when to start tx desc margin replenishment */
682 uint16_t rsrc_threshold_lo;
683 /* threshold_hi - where to stop during tx desc margin
684 replenishment */
685 uint16_t rsrc_threshold_hi;
686 } tx_queue;
687
688#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530689 qdf_spinlock_t peer_stat_mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690#endif
691
692 int rssi_update_shift;
693 int rssi_new_weight;
694#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
695 struct {
696 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
697 ol_txrx_local_peer_id_t freelist;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530698 qdf_spinlock_t lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800699 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
700 } local_peer_ids;
701#endif
702
703#ifdef QCA_COMPUTE_TX_DELAY
704#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
705#define QCA_TX_DELAY_NUM_CATEGORIES \
706 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
707#else
708#define QCA_TX_DELAY_NUM_CATEGORIES 1
709#endif
710 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530711 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800712 struct {
713 struct ol_tx_delay_data copies[2]; /* ping-pong */
714 int in_progress_idx;
715 uint32_t avg_start_time_ticks;
716 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
717 uint32_t tx_compl_timestamp_ticks;
718 uint32_t avg_period_ticks;
719 uint32_t hist_internal_bin_width_mult;
720 uint32_t hist_internal_bin_width_shift;
721 } tx_delay;
722
723 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
724 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
725
726#endif /* QCA_COMPUTE_TX_DELAY */
727
728 struct {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530729 qdf_spinlock_t mutex;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730 /* timer used to monitor the throttle "on" phase and
731 "off" phase */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530732 qdf_timer_t phase_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733 /* timer used to send tx frames */
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530734 qdf_timer_t tx_timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 /* This is the time in ms of the throttling window, it will
736 * include an "on" phase and an "off" phase */
737 uint32_t throttle_period_ms;
738 /* Current throttle level set by the client ex. level 0,
739 level 1, etc */
740 enum throttle_level current_throttle_level;
741 /* Index that points to the phase within the throttle period */
742 enum throttle_phase current_throttle_phase;
743 /* Maximum number of frames to send to the target at one time */
744 uint32_t tx_threshold;
745 /* stores time in ms of on/off phase for each throttle level */
746 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
747 /* mark true if traffic is paused due to thermal throttling */
748 bool is_paused;
749 } tx_throttle;
750
751#ifdef IPA_OFFLOAD
752 ipa_uc_op_cb_type ipa_uc_op_cb;
753 void *osif_dev;
754#endif /* IPA_UC_OFFLOAD */
755
756#if defined(FEATURE_TSO)
757 struct {
758 uint16_t pool_size;
759 uint16_t num_free;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530760 struct qdf_tso_seg_elem_t *freelist;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 /* tso mutex */
762 OL_TX_MUTEX_TYPE tso_mutex;
763 } tso_seg_pool;
764#endif
765 uint8_t ocb_peer_valid;
766 struct ol_txrx_peer_t *ocb_peer;
767 ol_tx_pause_callback_fp pause_cb;
768
769 struct {
770 void *lro_data;
771 void (*lro_flush_cb)(void *);
772 } lro_info;
Manjunathappa Prakashb7573722016-04-21 11:24:07 -0700773 struct ol_txrx_peer_t *self_peer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800774};
775
776struct ol_txrx_ocb_chan_info {
777 uint32_t chan_freq;
778 uint16_t disable_rx_stats_hdr:1;
779};
780
781struct ol_txrx_vdev_t {
782 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
783 the parent of this virtual device */
784 uint8_t vdev_id; /* ID used to specify a particular vdev
785 to the target */
786 void *osif_dev;
787 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
788 /* tx paused - NO LONGER NEEDED? */
789 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
790 of vdevs */
791 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
792 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
793 this vdev (not "self"
794 pseudo-peer) */
Dhanashri Atre182b0272016-02-17 15:35:07 -0800795 ol_txrx_rx_fp rx; /* receive function used by this vdev */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796
797 struct {
798 /*
799 * If the vdev object couldn't be deleted immediately because
800 * it still had some peer objects left, remember that a delete
801 * was requested, so it can be deleted once all its peers have
802 * been deleted.
803 */
804 int pending;
805 /*
806 * Store a function pointer and a context argument to provide a
807 * notification for when the vdev is deleted.
808 */
809 ol_txrx_vdev_delete_cb callback;
810 void *context;
811 } delete;
812
813 /* safe mode control to bypass the encrypt and decipher process */
814 uint32_t safemode;
815
816 /* rx filter related */
817 uint32_t drop_unenc;
818 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
819 uint32_t num_filters;
820
821 enum wlan_op_mode opmode;
822
823#ifdef QCA_IBSS_SUPPORT
824 /* ibss mode related */
825 int16_t ibss_peer_num; /* the number of active peers */
826 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
827#endif
828
829 struct {
830 struct {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530831 qdf_nbuf_t head;
832 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800833 int depth;
834 } txq;
835 uint32_t paused_reason;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530836 qdf_spinlock_t mutex;
Anurag Chouhan754fbd82016-02-19 17:00:08 +0530837 qdf_timer_t timer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800838 int max_q_depth;
839 bool is_q_paused;
840 bool is_q_timer_on;
841 uint32_t q_pause_cnt;
842 uint32_t q_unpause_cnt;
843 uint32_t q_overflow_cnt;
844 } ll_pause;
845 bool disable_intrabss_fwd;
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530846 qdf_atomic_t os_q_paused;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847 uint16_t tx_fl_lwm;
848 uint16_t tx_fl_hwm;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530849 qdf_spinlock_t flow_control_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
851 void *osif_fc_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800852 uint16_t wait_on_peer_id;
Anurag Chouhance0dc992016-02-16 18:18:03 +0530853 qdf_event_t wait_delete_comp;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800854#if defined(FEATURE_TSO)
855 struct {
856 int pool_elems; /* total number of elements in the pool */
857 int alloc_cnt; /* number of allocated elements */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530858 uint32_t *freelist; /* free list of qdf_tso_seg_elem_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 } tso_pool_t;
860#endif
861
862 /* last channel change event recieved */
863 struct {
864 bool is_valid; /* whether the rest of the members are valid */
865 uint16_t mhz;
866 uint16_t band_center_freq1;
867 uint16_t band_center_freq2;
868 WLAN_PHY_MODE phy_mode;
869 } ocb_channel_event;
870
871 /* Information about the schedules in the schedule */
872 struct ol_txrx_ocb_chan_info *ocb_channel_info;
873 uint32_t ocb_channel_count;
874
875#ifdef QCA_LL_TX_FLOW_CONTROL_V2
876 struct ol_tx_flow_pool_t *pool;
877#endif
878};
879
880struct ol_rx_reorder_array_elem_t {
Nirav Shahcbc6d722016-03-01 16:24:53 +0530881 qdf_nbuf_t head;
882 qdf_nbuf_t tail;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800883};
884
885struct ol_rx_reorder_t {
886 uint8_t win_sz;
887 uint8_t win_sz_mask;
888 uint8_t num_mpdus;
889 struct ol_rx_reorder_array_elem_t *array;
890 /* base - single rx reorder element used for non-aggr cases */
891 struct ol_rx_reorder_array_elem_t base;
892#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
893 struct ol_rx_reorder_timeout_list_elem_t timeout;
894#endif
895 /* only used for defrag right now */
896 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
897 uint32_t defrag_timeout_ms;
898 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
899 * waitlist */
900 uint16_t tid;
901};
902
903enum {
904 txrx_sec_mcast = 0,
905 txrx_sec_ucast
906};
907
908typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
909 tx_msdu_info);
910
911struct ol_txrx_peer_t {
912 struct ol_txrx_vdev_t *vdev;
913
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530914 qdf_atomic_t ref_cnt;
915 qdf_atomic_t delete_in_progress;
916 qdf_atomic_t flush_in_progress;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800917
918 /* The peer state tracking is used for HL systems
919 * that don't support tx and rx filtering within the target.
920 * In such systems, the peer's state determines what kind of
921 * tx and rx filtering, if any, is done.
922 * This variable doesn't apply to LL systems, or to HL systems for
923 * which the target handles tx and rx filtering. However, it is
924 * simplest to declare and update this variable unconditionally,
925 * for all systems.
926 */
927 enum ol_txrx_peer_state state;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530928 qdf_spinlock_t peer_info_lock;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530929 qdf_spinlock_t bufq_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800930 struct list_head cached_bufq;
931
932 ol_tx_filter_func tx_filter;
933
934 /* peer ID(s) for this peer */
935 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
936#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
937 uint16_t local_id;
938#endif
939
940 union ol_txrx_align_mac_addr_t mac_addr;
941
942 /* node in the vdev's list of peers */
943 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
944 /* node in the hash table bin's list of peers */
945 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
946
947 /*
948 * per TID info -
949 * stored in separate arrays to avoid alignment padding mem overhead
950 */
951 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
952 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
953 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
954 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
955 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
956 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
957
958 struct {
959 enum htt_sec_type sec_type;
960 uint32_t michael_key[2]; /* relevant for TKIP */
961 } security[2]; /* 0 -> multicast, 1 -> unicast */
962
963 /*
964 * rx proc function: this either is a copy of pdev's rx_opt_proc for
965 * regular rx processing, or has been redirected to a /dev/null discard
966 * function when peer deletion is in progress.
967 */
968 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
969 struct ol_txrx_peer_t *peer,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530970 unsigned tid, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800971
972#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
973 ol_txrx_peer_stats_t stats;
974#endif
975 int16_t rssi_dbm;
976
977 /* NAWDS Flag and Bss Peer bit */
978 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
979
980 /* QoS info */
981 uint8_t qos_capable;
982 /* U-APSD tid mask */
983 uint8_t uapsd_mask;
984 /*flag indicating key installed */
985 uint8_t keyinstalled;
986
987 /* Bit to indicate if PN check is done in fw */
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +0530988 qdf_atomic_t fw_pn_check;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800989
990#ifdef WLAN_FEATURE_11W
991 /* PN counter for Robust Management Frames */
992 uint64_t last_rmf_pn;
993 uint32_t rmf_pn_replays;
994 uint8_t last_rmf_pn_valid;
995#endif
996
997 /* Properties of the last received PPDU */
998 int16_t last_pkt_rssi_cmb;
999 int16_t last_pkt_rssi[4];
1000 uint8_t last_pkt_legacy_rate;
1001 uint8_t last_pkt_legacy_rate_sel;
1002 uint32_t last_pkt_timestamp_microsec;
1003 uint8_t last_pkt_timestamp_submicrosec;
1004 uint32_t last_pkt_tsf;
1005 uint8_t last_pkt_tid;
1006 uint16_t last_pkt_center_freq;
Krishna Kumaar Natarajanb7f9a352016-03-18 11:40:07 -07001007 qdf_time_t last_assoc_rcvd;
1008 qdf_time_t last_disassoc_rcvd;
1009 qdf_time_t last_deauth_rcvd;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010};
1011
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08001012enum ol_rx_err_type {
1013 OL_RX_ERR_DEFRAG_MIC,
1014 OL_RX_ERR_PN,
1015 OL_RX_ERR_UNKNOWN_PEER,
1016 OL_RX_ERR_MALFORMED,
1017 OL_RX_ERR_TKIP_MIC,
1018 OL_RX_ERR_DECRYPT,
1019 OL_RX_ERR_MPDU_LENGTH,
1020 OL_RX_ERR_ENCRYPT_REQUIRED,
1021 OL_RX_ERR_DUP,
1022 OL_RX_ERR_UNKNOWN,
1023 OL_RX_ERR_FCS,
1024 OL_RX_ERR_PRIVACY,
1025 OL_RX_ERR_NONE_FRAG,
1026 OL_RX_ERR_NONE = 0xFF
1027};
1028
1029/**
1030 * ol_mic_error_info - carries the information associated with
1031 * a MIC error
1032 * @vdev_id: virtual device ID
1033 * @key_id: Key ID
1034 * @pn: packet number
1035 * @sa: source address
1036 * @da: destination address
1037 * @ta: transmitter address
1038 */
1039struct ol_mic_error_info {
1040 uint8_t vdev_id;
1041 uint32_t key_id;
1042 uint64_t pn;
1043 uint8_t sa[OL_TXRX_MAC_ADDR_LEN];
1044 uint8_t da[OL_TXRX_MAC_ADDR_LEN];
1045 uint8_t ta[OL_TXRX_MAC_ADDR_LEN];
1046};
1047
1048/**
1049 * ol_error_info - carries the information associated with an
1050 * error indicated by the firmware
1051 * @mic_err: MIC error information
1052 */
1053struct ol_error_info {
1054 union {
1055 struct ol_mic_error_info mic_err;
1056 } u;
1057};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058#endif /* _OL_TXRX_TYPES__H_ */