blob: 50a4bd9eeb191a865568bff6aea00b8e98557c05 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_types.h
30 * @brief Define the major data types used internally by the host datapath SW.
31 */
32#ifndef _OL_TXRX_TYPES__H_
33#define _OL_TXRX_TYPES__H_
34
35#include <cdf_nbuf.h> /* cdf_nbuf_t */
36#include <cds_queue.h> /* TAILQ */
37#include <a_types.h> /* A_UINT8 */
38#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
39#include <cdf_atomic.h> /* cdf_atomic_t */
40#include <wdi_event_api.h> /* wdi_event_subscribe */
41#include <cdf_softirq_timer.h> /* cdf_softirq_timer_t */
42#include <cdf_lock.h> /* cdf_spinlock */
43#include <pktlog.h> /* ol_pktlog_dev_handle */
44#include <ol_txrx_stats.h>
45#include <txrx.h>
46#include "ol_txrx_htt_api.h"
47#include "ol_htt_tx_api.h"
48#include "ol_htt_rx_api.h"
49#include <ol_ctrl_txrx_api.h>
50#include <ol_txrx_ctrl_api.h>
51
52
53/*
54 * The target may allocate multiple IDs for a peer.
55 * In particular, the target may allocate one ID to represent the
56 * multicast key the peer uses, and another ID to represent the
57 * unicast key the peer uses.
58 */
59#define MAX_NUM_PEER_ID_PER_PEER 8
60
61#define OL_TXRX_MAC_ADDR_LEN 6
62
63/* OL_TXRX_NUM_EXT_TIDS -
64 * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
65 */
66#define OL_TXRX_NUM_EXT_TIDS 19
67
68#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
69#define OL_TX_NON_QOS_TID 16
70#define OL_TX_MGMT_TID 17
71#define OL_TX_NUM_TIDS 18
72#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
73
74#define OL_TX_VDEV_MCAST_BCAST 0 // HTT_TX_EXT_TID_MCAST_BCAST
75#define OL_TX_VDEV_DEFAULT_MGMT 1 // HTT_TX_EXT_TID_DEFALT_MGMT
76#define OL_TX_VDEV_NUM_QUEUES 2
77
78#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
79#define OL_TXRX_MGMT_NUM_TYPES 8
80
81#define OL_TX_MUTEX_TYPE cdf_spinlock_t
82#define OL_RX_MUTEX_TYPE cdf_spinlock_t
83
84/* TXRX Histogram defines */
85#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
86#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
87
88struct ol_txrx_pdev_t;
89struct ol_txrx_vdev_t;
90struct ol_txrx_peer_t;
91
92struct ol_pdev_t;
93typedef struct ol_pdev_t *ol_pdev_handle;
94
95struct ol_vdev_t;
96typedef struct ol_vdev_t *ol_vdev_handle;
97
98struct ol_peer_t;
99typedef struct ol_peer_t *ol_peer_handle;
100
101/* rx filter related */
102#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
103
104enum privacy_filter {
105 PRIVACY_FILTER_ALWAYS,
106 PRIVACY_FILTER_KEY_UNAVAILABLE,
107};
108
109enum privacy_filter_packet_type {
110 PRIVACY_FILTER_PACKET_UNICAST,
111 PRIVACY_FILTER_PACKET_MULTICAST,
112 PRIVACY_FILTER_PACKET_BOTH
113};
114
115struct privacy_exemption {
116 /* ethertype -
117 * type of ethernet frames this filter applies to, in host byte order
118 */
119 uint16_t ether_type;
120 enum privacy_filter filter_type;
121 enum privacy_filter_packet_type packet_type;
122};
123
124enum ol_tx_frm_type {
125 ol_tx_frm_std = 0, /* regular frame - no added header fragments */
126 ol_tx_frm_tso, /* TSO segment, with a modified IP header added */
127 ol_tx_frm_audio, /* audio frames, with a custom LLC/SNAP hdr added */
128 ol_tx_frm_no_free, /* frame requires special tx completion callback */
129};
130
131struct ol_tx_desc_t {
132 cdf_nbuf_t netbuf;
133 void *htt_tx_desc;
134#ifdef WLAN_FEATURE_FASTPATH
135 uint16_t id;
136#endif /* WLAN_FEATURE_FASTPATH */
137 uint32_t htt_tx_desc_paddr;
138#if defined(HELIUMPLUS_PADDR64)
139 void *htt_frag_desc; /* struct msdu_ext_desc_t * */
140 uint32_t htt_frag_desc_paddr;
141#endif /* defined(HELIUMPLUS_PADDR64) */
142 uint32_t index;
143 cdf_atomic_t ref_cnt;
144 enum htt_tx_status status;
145
146#ifdef QCA_COMPUTE_TX_DELAY
147 uint32_t entry_timestamp_ticks;
148#endif
149 /*
150 * Allow tx descriptors to be stored in (doubly-linked) lists.
151 * This is mainly used for HL tx queuing and scheduling, but is
152 * also used by LL+HL for batch processing of tx frames.
153 */
154 TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
155
156 /*
157 * Remember whether the tx frame is a regular packet, or whether
158 * the driver added extra header fragments (e.g. a modified IP header
159 * for TSO fragments, or an added LLC/SNAP header for audio interworking
160 * data) that need to be handled in a special manner.
161 * This field is filled in with the ol_tx_frm_type enum.
162 */
163 uint8_t pkt_type;
164#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
165 /* used by tx encap, to restore the os buf start offset
166 after tx complete */
167 uint8_t orig_l2_hdr_bytes;
168#endif
169#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
170 struct ol_txrx_vdev_t *vdev;
171#endif
172#ifdef QCA_LL_TX_FLOW_CONTROL_V2
173 struct ol_tx_flow_pool_t *pool;
174#endif
175 void *tso_desc;
176};
177
178typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
179
180union ol_tx_desc_list_elem_t {
181 union ol_tx_desc_list_elem_t *next;
182 struct ol_tx_desc_t tx_desc;
183};
184
185union ol_txrx_align_mac_addr_t {
186 uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
187 struct {
188 uint16_t bytes_ab;
189 uint16_t bytes_cd;
190 uint16_t bytes_ef;
191 } align2;
192 struct {
193 uint32_t bytes_abcd;
194 uint16_t bytes_ef;
195 } align4;
196};
197
198struct ol_rx_reorder_timeout_list_elem_t {
199 TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
200 reorder_timeout_list_elem;
201 uint32_t timestamp_ms;
202 struct ol_txrx_peer_t *peer;
203 uint8_t tid;
204 uint8_t active;
205};
206
207#define TXRX_TID_TO_WMM_AC(_tid) ( \
208 (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
209 (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
210 (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
211 TXRX_WMM_AC_BE)
212
213struct ol_tx_reorder_cat_timeout_t {
214 TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
215 cdf_softirq_timer_t timer;
216 uint32_t duration_ms;
217 struct ol_txrx_pdev_t *pdev;
218};
219
220enum ol_tx_queue_status {
221 ol_tx_queue_empty = 0,
222 ol_tx_queue_active,
223 ol_tx_queue_paused,
224};
225
226struct ol_txrx_msdu_info_t {
227 struct htt_msdu_info_t htt;
228 struct ol_txrx_peer_t *peer;
229 struct cdf_tso_info_t tso_info;
230};
231
232enum {
233 ol_tx_aggr_untried = 0,
234 ol_tx_aggr_enabled,
235 ol_tx_aggr_disabled,
236 ol_tx_aggr_retry,
237 ol_tx_aggr_in_progress,
238};
239
240struct ol_tx_frms_queue_t {
241 /* list_elem -
242 * Allow individual tx frame queues to be linked together into
243 * scheduler queues of tx frame queues
244 */
245 TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
246 uint8_t aggr_state;
247 struct {
248 uint8_t total;
249 /* pause requested by ctrl SW rather than txrx SW */
250 uint8_t by_ctrl;
251 } paused_count;
252 uint8_t ext_tid;
253 uint16_t frms;
254 uint32_t bytes;
255 ol_tx_desc_list head;
256 enum ol_tx_queue_status flag;
257};
258
259enum {
260 ol_tx_log_entry_type_invalid,
261 ol_tx_log_entry_type_queue_state,
262 ol_tx_log_entry_type_enqueue,
263 ol_tx_log_entry_type_dequeue,
264 ol_tx_log_entry_type_drop,
265 ol_tx_log_entry_type_queue_free,
266
267 ol_tx_log_entry_type_wrap,
268};
269
270struct ol_tx_log_queue_state_var_sz_t {
271 uint32_t active_bitmap;
272 uint16_t credit;
273 uint8_t num_cats_active;
274 uint8_t data[1];
275};
276
277struct ol_tx_log_queue_add_t {
278 uint8_t num_frms;
279 uint8_t tid;
280 uint16_t peer_id;
281 uint16_t num_bytes;
282};
283
284struct ol_mac_addr {
285 uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
286};
287
288#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
289#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
290#endif
291
292#ifndef ol_txrx_local_peer_id_t
293#define ol_txrx_local_peer_id_t uint8_t /* default */
294#endif
295
296#ifdef QCA_COMPUTE_TX_DELAY
297/*
298 * Delay histogram bins: 16 bins of 10 ms each to count delays
299 * from 0-160 ms, plus one overflow bin for delays > 160 ms.
300 */
301#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
302#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
303
304struct ol_tx_delay_data {
305 struct {
306 uint64_t transmit_sum_ticks;
307 uint64_t queue_sum_ticks;
308 uint32_t transmit_num;
309 uint32_t queue_num;
310 } avgs;
311 uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
312};
313
314#endif /* QCA_COMPUTE_TX_DELAY */
315
316/* Thermal Mitigation */
317
318enum throttle_level {
319 THROTTLE_LEVEL_0,
320 THROTTLE_LEVEL_1,
321 THROTTLE_LEVEL_2,
322 THROTTLE_LEVEL_3,
323 /* Invalid */
324 THROTTLE_LEVEL_MAX,
325};
326
327enum throttle_phase {
328 THROTTLE_PHASE_OFF,
329 THROTTLE_PHASE_ON,
330 /* Invalid */
331 THROTTLE_PHASE_MAX,
332};
333
334#define THROTTLE_TX_THRESHOLD (100)
335
336typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
337
338#ifdef QCA_LL_TX_FLOW_CONTROL_V2
339
340/**
341 * enum flow_pool_status - flow pool status
342 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
343 * and network queues are unpaused
344 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
345 * and network queues are paused
346 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
347 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
348 */
349enum flow_pool_status {
350 FLOW_POOL_ACTIVE_UNPAUSED = 0,
351 FLOW_POOL_ACTIVE_PAUSED = 1,
352 FLOW_POOL_INVALID = 2,
353 FLOW_POOL_INACTIVE = 3,
354};
355
356/**
357 * struct ol_txrx_pool_stats - flow pool related statistics
358 * @pool_map_count: flow pool map received
359 * @pool_unmap_count: flow pool unmap received
360 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
361 * @pkt_drop_no_desc: packets dropped due to unavailablity of descriptors
362 */
363struct ol_txrx_pool_stats {
364 uint16_t pool_map_count;
365 uint16_t pool_unmap_count;
366 uint16_t pkt_drop_no_pool;
367 uint16_t pkt_drop_no_desc;
368};
369
370/**
371 * struct ol_tx_flow_pool_t - flow_pool info
372 * @flow_pool_list_elem: flow_pool_list element
373 * @flow_pool_lock: flow_pool lock
374 * @flow_pool_id: flow_pool id
375 * @flow_pool_size: flow_pool size
376 * @avail_desc: available descriptors
377 * @deficient_desc: deficient descriptors
378 * @status: flow pool status
379 * @flow_type: flow pool type
380 * @member_flow_id: member flow id
381 * @stop_th: stop threshold
382 * @start_th: start threshold
383 * @freelist: tx descriptor freelist
384 */
385struct ol_tx_flow_pool_t {
386 TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
387 cdf_spinlock_t flow_pool_lock;
388 uint8_t flow_pool_id;
389 uint16_t flow_pool_size;
390 uint16_t avail_desc;
391 uint16_t deficient_desc;
392 enum flow_pool_status status;
393 enum htt_flow_type flow_type;
394 uint8_t member_flow_id;
395 uint16_t stop_th;
396 uint16_t start_th;
397 union ol_tx_desc_list_elem_t *freelist;
398};
399
400#endif
401
402/*
403 * As depicted in the diagram below, the pdev contains an array of
404 * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
405 * Each element identifies all the tx queues that are active for
406 * the TID, from the different peers.
407 *
408 * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
409 * Each element identifies the tx frames for the TID that need to be sent
410 * to the peer.
411 *
412 *
413 * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
414 * TID
415 * 0 1 2 17
416 * +============+============+============+== ==+============+
417 * | active (y) | active (n) | active (n) | | active (y) |
418 * |------------+------------+------------+-- --+------------|
419 * | queues | queues | queues | | queues |
420 * +============+============+============+== ==+============+
421 * | |
422 * .--+-----------------------------------------------'
423 * | |
424 * | | peer X: peer Y:
425 * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
426 * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
427 * | | TID +======+ TID +======+
428 * | `---->| next |-------------------------->| next |--X
429 * | 0 | prev | .------. .------. 0 | prev | .------.
430 * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
431 * | +======+ `------' `------' +======+ `------'
432 * | | next | | | 1 | next | |
433 * | 1 | prev | v v | prev | v
434 * | | txq | .------. .------. | txq | .------.
435 * | +======+ |netbuf| |netbuf| +======+ |netbuf|
436 * | | next | `------' `------' | next | `------'
437 * | 2 | prev | 2 | prev |
438 * | | txq | | txq |
439 * | +======+ +======+
440 * | | | | |
441 * |
442 * |
443 * | | | | |
444 * | +======+ +======+
445 * `------->| next |--X | next |
446 * 17 | prev | .------. 17 | prev |
447 * | txq |-->|txdesc| | txq |
448 * +======+ `------' +======+
449 * |
450 * v
451 * .------.
452 * |netbuf|
453 * `------'
454 */
455struct ol_txrx_pdev_t {
456 /* ctrl_pdev - handle for querying config info */
457 ol_pdev_handle ctrl_pdev;
458
459 /* osdev - handle for mem alloc / free, map / unmap */
460 cdf_device_t osdev;
461
462 htt_pdev_handle htt_pdev;
463
464#ifdef WLAN_FEATURE_FASTPATH
465 struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
466 struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
467#endif /* WLAN_FEATURE_FASTPATH */
468
469 struct {
470 int is_high_latency;
471 int host_addba;
472 int ll_pause_txq_limit;
473 int default_tx_comp_req;
474 } cfg;
475
476 /* WDI subscriber's event list */
477 wdi_event_subscribe **wdi_event_list;
478
479#ifndef REMOVE_PKT_LOG
480 /* Pktlog pdev */
481 struct ol_pktlog_dev_t *pl_dev;
482#endif /* #ifndef REMOVE_PKT_LOG */
483
484 enum ol_sec_type sec_types[htt_num_sec_types];
485 /* standard frame type */
486 enum wlan_frm_fmt frame_format;
487 enum htt_pkt_type htt_pkt_type;
488
489#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
490 /* txrx encap/decap */
491 uint8_t sw_tx_encap;
492 uint8_t sw_rx_decap;
493 uint8_t target_tx_tran_caps;
494 uint8_t target_rx_tran_caps;
495 /* llc process */
496 uint8_t sw_tx_llc_proc_enable;
497 uint8_t sw_rx_llc_proc_enable;
498 /* A-MSDU */
499 uint8_t sw_subfrm_hdr_recovery_enable;
500 /* Protected Frame bit handling */
501 uint8_t sw_pf_proc_enable;
502#endif
503 /*
504 * target tx credit -
505 * not needed for LL, but used for HL download scheduler to keep
506 * track of roughly how much space is available in the target for
507 * tx frames
508 */
509 cdf_atomic_t target_tx_credit;
510 cdf_atomic_t orig_target_tx_credit;
511
512 /* Peer mac address to staid mapping */
513 struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
514
515 /* ol_txrx_vdev list */
516 TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
517
518 /* peer ID to peer object map (array of pointers to peer objects) */
519 struct ol_txrx_peer_t **peer_id_to_obj_map;
520
521 struct {
522 unsigned mask;
523 unsigned idx_bits;
524 TAILQ_HEAD(, ol_txrx_peer_t) * bins;
525 } peer_hash;
526
527 /* rx specific processing */
528 struct {
529 struct {
530 TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
531 uint32_t timeout_ms;
532 } defrag;
533 struct {
534 int defrag_timeout_check;
535 int dup_check;
536 } flags;
537
538 struct {
539 struct ol_tx_reorder_cat_timeout_t
540 access_cats[TXRX_NUM_WMM_AC];
541 } reorder_timeout;
542 cdf_spinlock_t mutex;
543 } rx;
544
545 /* rx proc function */
546 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
547 struct ol_txrx_peer_t *peer,
548 unsigned tid, cdf_nbuf_t msdu_list);
549
550 /* tx data delivery notification callback function */
551 struct {
552 ol_txrx_data_tx_cb func;
553 void *ctxt;
554 } tx_data_callback;
555
556 /* tx management delivery notification callback functions */
557 struct {
558 struct {
559 ol_txrx_mgmt_tx_cb download_cb;
560 ol_txrx_mgmt_tx_cb ota_ack_cb;
561 void *ctxt;
562 } callbacks[OL_TXRX_MGMT_NUM_TYPES];
563 } tx_mgmt;
564
565 struct {
566 uint16_t pool_size;
567 uint16_t num_free;
568 union ol_tx_desc_list_elem_t *array;
569 union ol_tx_desc_list_elem_t *freelist;
570#ifdef QCA_LL_TX_FLOW_CONTROL_V2
571 uint8_t num_invalid_bin;
572 cdf_spinlock_t flow_pool_list_lock;
573 TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
574#endif
575 } tx_desc;
576
577#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
578 struct ol_txrx_pool_stats pool_stats;
579 uint32_t num_msdu_desc;
580#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
581 struct ol_tx_flow_pool_t *mgmt_pool;
582#endif
583#endif
584
585 struct {
586 int (*cmp)(union htt_rx_pn_t *new,
587 union htt_rx_pn_t *old,
588 int is_unicast, int opmode);
589 int len;
590 } rx_pn[htt_num_sec_types];
591
592 /* tx mutex */
593 OL_TX_MUTEX_TYPE tx_mutex;
594
595 /*
596 * peer ref mutex:
597 * 1. Protect peer object lookups until the returned peer object's
598 * reference count is incremented.
599 * 2. Provide mutex when accessing peer object lookup structures.
600 */
601 OL_RX_MUTEX_TYPE peer_ref_mutex;
602
603 /*
604 * last_real_peer_mutex:
605 * Protect lookups of any vdev's last_real_peer pointer until the
606 * reference count for the pointed-to peer object is incremented.
607 * This mutex could be in the vdev struct, but it's slightly simpler
608 * to have a single lock in the pdev struct. Since the lock is only
609 * held for an extremely short time, and since it's very unlikely for
610 * two vdev's to concurrently access the lock, there's no real
611 * benefit to having a per-vdev lock.
612 */
613 OL_RX_MUTEX_TYPE last_real_peer_mutex;
614
615 struct {
616 struct {
617 struct {
618 struct {
619 uint64_t ppdus;
620 uint64_t mpdus;
621 } normal;
622 struct {
623 /*
624 * mpdu_bad is general -
625 * replace it with the specific counters
626 * below
627 */
628 uint64_t mpdu_bad;
629 /* uint64_t mpdu_fcs; */
630 /* uint64_t mpdu_duplicate; */
631 /* uint64_t mpdu_pn_replay; */
632 /* uint64_t mpdu_bad_sender; */
633 /* ^ comment: peer not found */
634 /* uint64_t mpdu_flushed; */
635 /* uint64_t msdu_defrag_mic_err; */
636 uint64_t msdu_mc_dup_drop;
637 } err;
638 } rx;
639 } priv;
640 struct ol_txrx_stats pub;
641 } stats;
642
643#if defined(ENABLE_RX_REORDER_TRACE)
644 struct {
645 uint32_t mask;
646 uint32_t idx;
647 uint64_t cnt;
648#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
649 struct {
650 uint16_t reorder_idx;
651 uint16_t seq_num;
652 uint8_t num_mpdus;
653 uint8_t tid;
654 } *data;
655 } rx_reorder_trace;
656#endif /* ENABLE_RX_REORDER_TRACE */
657
658#if defined(ENABLE_RX_PN_TRACE)
659 struct {
660 uint32_t mask;
661 uint32_t idx;
662 uint64_t cnt;
663#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
664 struct {
665 struct ol_txrx_peer_t *peer;
666 uint32_t pn32;
667 uint16_t seq_num;
668 uint8_t unicast;
669 uint8_t tid;
670 } *data;
671 } rx_pn_trace;
672#endif /* ENABLE_RX_PN_TRACE */
673
674#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
675 bool host_80211_enable;
676#endif
677
678 /*
679 * tx_queue only applies for HL, but is defined unconditionally to avoid
680 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
681 * conditional compilation.
682 */
683 struct {
684 cdf_atomic_t rsrc_cnt;
685 /* threshold_lo - when to start tx desc margin replenishment */
686 uint16_t rsrc_threshold_lo;
687 /* threshold_hi - where to stop during tx desc margin
688 replenishment */
689 uint16_t rsrc_threshold_hi;
690 } tx_queue;
691
692#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
693 cdf_spinlock_t peer_stat_mutex;
694#endif
695
696 int rssi_update_shift;
697 int rssi_new_weight;
698#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
699 struct {
700 ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
701 ol_txrx_local_peer_id_t freelist;
702 cdf_spinlock_t lock;
703 ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
704 } local_peer_ids;
705#endif
706
707#ifdef QCA_COMPUTE_TX_DELAY
708#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
709#define QCA_TX_DELAY_NUM_CATEGORIES \
710 (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
711#else
712#define QCA_TX_DELAY_NUM_CATEGORIES 1
713#endif
714 struct {
715 cdf_spinlock_t mutex;
716 struct {
717 struct ol_tx_delay_data copies[2]; /* ping-pong */
718 int in_progress_idx;
719 uint32_t avg_start_time_ticks;
720 } cats[QCA_TX_DELAY_NUM_CATEGORIES];
721 uint32_t tx_compl_timestamp_ticks;
722 uint32_t avg_period_ticks;
723 uint32_t hist_internal_bin_width_mult;
724 uint32_t hist_internal_bin_width_shift;
725 } tx_delay;
726
727 uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
728 uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
729
730#endif /* QCA_COMPUTE_TX_DELAY */
731
732 struct {
733 cdf_spinlock_t mutex;
734 /* timer used to monitor the throttle "on" phase and
735 "off" phase */
736 cdf_softirq_timer_t phase_timer;
737 /* timer used to send tx frames */
738 cdf_softirq_timer_t tx_timer;
739 /* This is the time in ms of the throttling window, it will
740 * include an "on" phase and an "off" phase */
741 uint32_t throttle_period_ms;
742 /* Current throttle level set by the client ex. level 0,
743 level 1, etc */
744 enum throttle_level current_throttle_level;
745 /* Index that points to the phase within the throttle period */
746 enum throttle_phase current_throttle_phase;
747 /* Maximum number of frames to send to the target at one time */
748 uint32_t tx_threshold;
749 /* stores time in ms of on/off phase for each throttle level */
750 int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
751 /* mark true if traffic is paused due to thermal throttling */
752 bool is_paused;
753 } tx_throttle;
754
755#ifdef IPA_OFFLOAD
756 ipa_uc_op_cb_type ipa_uc_op_cb;
757 void *osif_dev;
758#endif /* IPA_UC_OFFLOAD */
759
760#if defined(FEATURE_TSO)
761 struct {
762 uint16_t pool_size;
763 uint16_t num_free;
764 struct cdf_tso_seg_elem_t *array;
765 struct cdf_tso_seg_elem_t *freelist;
766 /* tso mutex */
767 OL_TX_MUTEX_TYPE tso_mutex;
768 } tso_seg_pool;
769#endif
770 uint8_t ocb_peer_valid;
771 struct ol_txrx_peer_t *ocb_peer;
772 ol_tx_pause_callback_fp pause_cb;
773
774 struct {
775 void *lro_data;
776 void (*lro_flush_cb)(void *);
777 } lro_info;
778};
779
780struct ol_txrx_ocb_chan_info {
781 uint32_t chan_freq;
782 uint16_t disable_rx_stats_hdr:1;
783};
784
785struct ol_txrx_vdev_t {
786 struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
787 the parent of this virtual device */
788 uint8_t vdev_id; /* ID used to specify a particular vdev
789 to the target */
790 void *osif_dev;
791 union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
792 /* tx paused - NO LONGER NEEDED? */
793 TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
794 of vdevs */
795 TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
796 struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
797 this vdev (not "self"
798 pseudo-peer) */
799 ol_txrx_tx_fp tx; /* transmit function used by this vdev */
800
801 struct {
802 /*
803 * If the vdev object couldn't be deleted immediately because
804 * it still had some peer objects left, remember that a delete
805 * was requested, so it can be deleted once all its peers have
806 * been deleted.
807 */
808 int pending;
809 /*
810 * Store a function pointer and a context argument to provide a
811 * notification for when the vdev is deleted.
812 */
813 ol_txrx_vdev_delete_cb callback;
814 void *context;
815 } delete;
816
817 /* safe mode control to bypass the encrypt and decipher process */
818 uint32_t safemode;
819
820 /* rx filter related */
821 uint32_t drop_unenc;
822 struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
823 uint32_t num_filters;
824
825 enum wlan_op_mode opmode;
826
827#ifdef QCA_IBSS_SUPPORT
828 /* ibss mode related */
829 int16_t ibss_peer_num; /* the number of active peers */
830 int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
831#endif
832
833 struct {
834 struct {
835 cdf_nbuf_t head;
836 cdf_nbuf_t tail;
837 int depth;
838 } txq;
839 uint32_t paused_reason;
840 cdf_spinlock_t mutex;
841 cdf_softirq_timer_t timer;
842 int max_q_depth;
843 bool is_q_paused;
844 bool is_q_timer_on;
845 uint32_t q_pause_cnt;
846 uint32_t q_unpause_cnt;
847 uint32_t q_overflow_cnt;
848 } ll_pause;
849 bool disable_intrabss_fwd;
850 cdf_atomic_t os_q_paused;
851 uint16_t tx_fl_lwm;
852 uint16_t tx_fl_hwm;
853 cdf_spinlock_t flow_control_lock;
854 ol_txrx_tx_flow_control_fp osif_flow_control_cb;
855 void *osif_fc_ctx;
856
857#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
858 cdf_atomic_t tx_desc_count;
859#endif
860 uint16_t wait_on_peer_id;
861 cdf_event_t wait_delete_comp;
862#if defined(FEATURE_TSO)
863 struct {
864 int pool_elems; /* total number of elements in the pool */
865 int alloc_cnt; /* number of allocated elements */
866 uint32_t *freelist; /* free list of cdf_tso_seg_elem_t */
867 } tso_pool_t;
868#endif
869
870 /* last channel change event recieved */
871 struct {
872 bool is_valid; /* whether the rest of the members are valid */
873 uint16_t mhz;
874 uint16_t band_center_freq1;
875 uint16_t band_center_freq2;
876 WLAN_PHY_MODE phy_mode;
877 } ocb_channel_event;
878
879 /* Information about the schedules in the schedule */
880 struct ol_txrx_ocb_chan_info *ocb_channel_info;
881 uint32_t ocb_channel_count;
882
883#ifdef QCA_LL_TX_FLOW_CONTROL_V2
884 struct ol_tx_flow_pool_t *pool;
885#endif
886};
887
888struct ol_rx_reorder_array_elem_t {
889 cdf_nbuf_t head;
890 cdf_nbuf_t tail;
891};
892
893struct ol_rx_reorder_t {
894 uint8_t win_sz;
895 uint8_t win_sz_mask;
896 uint8_t num_mpdus;
897 struct ol_rx_reorder_array_elem_t *array;
898 /* base - single rx reorder element used for non-aggr cases */
899 struct ol_rx_reorder_array_elem_t base;
900#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
901 struct ol_rx_reorder_timeout_list_elem_t timeout;
902#endif
903 /* only used for defrag right now */
904 TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
905 uint32_t defrag_timeout_ms;
906 /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
907 * waitlist */
908 uint16_t tid;
909};
910
911enum {
912 txrx_sec_mcast = 0,
913 txrx_sec_ucast
914};
915
916typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
917 tx_msdu_info);
918
919struct ol_txrx_peer_t {
920 struct ol_txrx_vdev_t *vdev;
921
922 cdf_atomic_t ref_cnt;
923 cdf_atomic_t delete_in_progress;
924 cdf_atomic_t flush_in_progress;
925
926 /* The peer state tracking is used for HL systems
927 * that don't support tx and rx filtering within the target.
928 * In such systems, the peer's state determines what kind of
929 * tx and rx filtering, if any, is done.
930 * This variable doesn't apply to LL systems, or to HL systems for
931 * which the target handles tx and rx filtering. However, it is
932 * simplest to declare and update this variable unconditionally,
933 * for all systems.
934 */
935 enum ol_txrx_peer_state state;
936 cdf_spinlock_t peer_info_lock;
937 ol_rx_callback_fp osif_rx;
938 cdf_spinlock_t bufq_lock;
939 struct list_head cached_bufq;
940
941 ol_tx_filter_func tx_filter;
942
943 /* peer ID(s) for this peer */
944 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
945#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
946 uint16_t local_id;
947#endif
948
949 union ol_txrx_align_mac_addr_t mac_addr;
950
951 /* node in the vdev's list of peers */
952 TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
953 /* node in the hash table bin's list of peers */
954 TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
955
956 /*
957 * per TID info -
958 * stored in separate arrays to avoid alignment padding mem overhead
959 */
960 struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
961 union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
962 uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
963 uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
964 uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
965 uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
966
967 struct {
968 enum htt_sec_type sec_type;
969 uint32_t michael_key[2]; /* relevant for TKIP */
970 } security[2]; /* 0 -> multicast, 1 -> unicast */
971
972 /*
973 * rx proc function: this either is a copy of pdev's rx_opt_proc for
974 * regular rx processing, or has been redirected to a /dev/null discard
975 * function when peer deletion is in progress.
976 */
977 void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
978 struct ol_txrx_peer_t *peer,
979 unsigned tid, cdf_nbuf_t msdu_list);
980
981#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
982 ol_txrx_peer_stats_t stats;
983#endif
984 int16_t rssi_dbm;
985
986 /* NAWDS Flag and Bss Peer bit */
987 uint16_t nawds_enabled:1, bss_peer:1, valid:1;
988
989 /* QoS info */
990 uint8_t qos_capable;
991 /* U-APSD tid mask */
992 uint8_t uapsd_mask;
993 /*flag indicating key installed */
994 uint8_t keyinstalled;
995
996 /* Bit to indicate if PN check is done in fw */
997 cdf_atomic_t fw_pn_check;
998
999#ifdef WLAN_FEATURE_11W
1000 /* PN counter for Robust Management Frames */
1001 uint64_t last_rmf_pn;
1002 uint32_t rmf_pn_replays;
1003 uint8_t last_rmf_pn_valid;
1004#endif
1005
1006 /* Properties of the last received PPDU */
1007 int16_t last_pkt_rssi_cmb;
1008 int16_t last_pkt_rssi[4];
1009 uint8_t last_pkt_legacy_rate;
1010 uint8_t last_pkt_legacy_rate_sel;
1011 uint32_t last_pkt_timestamp_microsec;
1012 uint8_t last_pkt_timestamp_submicrosec;
1013 uint32_t last_pkt_tsf;
1014 uint8_t last_pkt_tid;
1015 uint16_t last_pkt_center_freq;
1016};
1017
1018#endif /* _OL_TXRX_TYPES__H_ */