Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Venkata Sharath Chandra Manchala | 2996517 | 2018-01-18 14:17:29 -0800 | [diff] [blame] | 2 | * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 19 | #ifndef _OL_TXRX__H_ |
| 20 | #define _OL_TXRX__H_ |
| 21 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 22 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 23 | #include <cdp_txrx_cmn.h> /* ol_txrx_vdev_t, etc. */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 24 | #include "cds_sched.h" |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 25 | #include <cdp_txrx_handle.h> |
Poddar, Siddarth | 8e3ee2d | 2016-11-29 20:17:01 +0530 | [diff] [blame] | 26 | #include <ol_txrx_types.h> |
| 27 | /* |
| 28 | * Pool of tx descriptors reserved for |
| 29 | * high-priority traffic, such as ARP/EAPOL etc |
| 30 | * only for forwarding path. |
| 31 | */ |
| 32 | #define OL_TX_NON_FWD_RESERVE 100 |
| 33 | |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 34 | ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev, |
| 35 | u8 *peer_addr, |
| 36 | u8 *peer_id, |
| 37 | enum peer_debug_id_type |
| 38 | dbg_id); |
Mohit Khanna | b04dfcd | 2017-02-13 18:54:35 -0800 | [diff] [blame] | 39 | |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 40 | int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer, |
| 41 | enum peer_debug_id_type dbg_id); |
Alok Kumar | bda73bb | 2018-05-17 11:50:03 +0530 | [diff] [blame] | 42 | /* ol_txrx_is_peer_eligible_for_deletion() - check if peer to be deleted |
| 43 | * @peer: peer handler |
Alok Kumar | 4d87ff2 | 2018-06-01 17:15:57 +0530 | [diff] [blame] | 44 | * @pdev: pointer to pdev |
Alok Kumar | bda73bb | 2018-05-17 11:50:03 +0530 | [diff] [blame] | 45 | * |
| 46 | * Return: true if eligible for deletion else false |
| 47 | */ |
Alok Kumar | 4d87ff2 | 2018-06-01 17:15:57 +0530 | [diff] [blame] | 48 | bool ol_txrx_is_peer_eligible_for_deletion(ol_txrx_peer_handle peer, |
| 49 | struct ol_txrx_pdev_t *pdev); |
Mohit Khanna | b7bec72 | 2017-11-10 11:43:44 -0800 | [diff] [blame] | 50 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 51 | /** |
| 52 | * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems |
| 53 | * @ctrl_pdev: the control pdev handle |
| 54 | * |
| 55 | * Return: allocated pool size |
| 56 | */ |
| 57 | u_int16_t |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 58 | ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 59 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 60 | #ifndef OL_TX_AVG_FRM_BYTES |
| 61 | #define OL_TX_AVG_FRM_BYTES 1000 |
| 62 | #endif |
| 63 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 64 | #ifndef OL_TX_DESC_POOL_SIZE_MIN_HL |
| 65 | #define OL_TX_DESC_POOL_SIZE_MIN_HL 500 |
| 66 | #endif |
| 67 | |
| 68 | #ifndef OL_TX_DESC_POOL_SIZE_MAX_HL |
| 69 | #define OL_TX_DESC_POOL_SIZE_MAX_HL 5000 |
| 70 | #endif |
| 71 | |
| 72 | |
| 73 | #ifdef CONFIG_PER_VDEV_TX_DESC_POOL |
| 74 | #define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400 |
| 75 | #define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100 |
| 76 | #endif |
| 77 | |
| 78 | #ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE |
| 79 | #define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20 |
| 80 | #endif |
| 81 | |
| 82 | #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS) |
| 83 | |
| 84 | void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 85 | ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *vdev, bool flag); |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 86 | #else |
| 87 | |
| 88 | static inline void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 89 | ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *vdev, bool flag) |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 90 | { |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 91 | } |
| 92 | #endif |
| 93 | |
Venkata Sharath Chandra Manchala | 2996517 | 2018-01-18 14:17:29 -0800 | [diff] [blame] | 94 | #ifdef WDI_EVENT_ENABLE |
| 95 | void *ol_get_pldev(struct cdp_pdev *txrx_pdev); |
| 96 | #else |
Nirav Shah | bb8e47c | 2018-05-17 16:56:41 +0530 | [diff] [blame] | 97 | static inline |
Venkata Sharath Chandra Manchala | 2996517 | 2018-01-18 14:17:29 -0800 | [diff] [blame] | 98 | void *ol_get_pldev(struct cdp_pdev *txrx_pdev) |
| 99 | { |
| 100 | return NULL; |
| 101 | } |
| 102 | #endif |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 103 | |
| 104 | #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID |
| 105 | ol_txrx_peer_handle |
| 106 | ol_txrx_peer_find_by_local_id(struct cdp_pdev *pdev, |
| 107 | uint8_t local_peer_id); |
| 108 | ol_txrx_peer_handle |
| 109 | ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev, |
| 110 | uint8_t local_peer_id, |
| 111 | enum peer_debug_id_type dbg_id); |
| 112 | #endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */ |
| 113 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 114 | /* |
Poddar, Siddarth | bd80420 | 2016-11-23 18:19:49 +0530 | [diff] [blame] | 115 | * @nbuf: buffer which contains data to be displayed |
| 116 | * @nbuf_paddr: physical address of the buffer |
| 117 | * @len: defines the size of the data to be displayed |
| 118 | * |
| 119 | * Return: None |
| 120 | */ |
| 121 | void |
| 122 | ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len); |
| 123 | |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 124 | struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id); |
| 125 | |
| 126 | void *ol_txrx_find_peer_by_addr(struct cdp_pdev *pdev, |
| 127 | uint8_t *peer_addr, |
| 128 | uint8_t *peer_id); |
| 129 | |
| 130 | void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *scn); |
| 131 | void peer_unmap_timer_work_function(void *); |
| 132 | void peer_unmap_timer_handler(void *data); |
| 133 | |
| 134 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 135 | int ol_txrx_register_tx_flow_control(uint8_t vdev_id, |
| 136 | ol_txrx_tx_flow_control_fp flow_control, |
| 137 | void *osif_fc_ctx, |
| 138 | ol_txrx_tx_flow_control_is_pause_fp |
| 139 | flow_control_is_pause); |
| 140 | |
| 141 | int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id); |
| 142 | |
| 143 | bool ol_txrx_get_tx_resource(uint8_t sta_id, |
| 144 | unsigned int low_watermark, |
| 145 | unsigned int high_watermark_offset); |
| 146 | |
| 147 | int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth); |
| 148 | #endif |
| 149 | |
| 150 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
| 151 | void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc); |
| 152 | uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev); |
| 153 | static inline |
| 154 | uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev) |
| 155 | { |
| 156 | return pdev->num_msdu_desc; |
| 157 | } |
| 158 | |
| 159 | QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc, |
| 160 | tx_pause_callback pause_cb); |
Poddar, Siddarth | 8e3ee2d | 2016-11-29 20:17:01 +0530 | [diff] [blame] | 161 | /** |
| 162 | * ol_txrx_fwd_desc_thresh_check() - check to forward packet to tx path |
| 163 | * @vdev: which virtual device the frames were addressed to |
| 164 | * |
| 165 | * This API is to check whether enough descriptors are available or not |
| 166 | * to forward packet to tx path. If not enough descriptors left, |
| 167 | * start dropping tx-path packets. |
| 168 | * Do not pause netif queues as still a pool of descriptors is reserved |
| 169 | * for high-priority traffic such as EAPOL/ARP etc. |
| 170 | * In case of intra-bss forwarding, it could be possible that tx-path can |
| 171 | * consume all the tx descriptors and pause netif queues. Due to this, |
| 172 | * there would be some left for stack triggered packets such as ARP packets |
| 173 | * which could lead to disconnection of device. To avoid this, reserved |
| 174 | * a pool of descriptors for high-priority packets, i.e., reduce the |
| 175 | * threshold of drop in the intra-bss forwarding path. |
| 176 | * |
| 177 | * Return: true ; forward the packet, i.e., below threshold |
| 178 | * false; not enough descriptors, drop the packet |
| 179 | */ |
| 180 | bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev); |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 181 | #else |
| 182 | /** |
| 183 | * ol_tx_get_desc_global_pool_size() - get global pool size |
| 184 | * @pdev: pdev handle |
| 185 | * |
| 186 | * Return: global pool size |
| 187 | */ |
| 188 | static inline |
| 189 | uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev) |
| 190 | { |
| 191 | return ol_cfg_target_tx_credit(pdev->ctrl_pdev); |
| 192 | } |
Poddar, Siddarth | bd80420 | 2016-11-23 18:19:49 +0530 | [diff] [blame] | 193 | |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 194 | /** |
| 195 | * ol_tx_get_total_free_desc() - get total free descriptors |
| 196 | * @pdev: pdev handle |
| 197 | * |
| 198 | * Return: total free descriptors |
| 199 | */ |
| 200 | static inline |
| 201 | uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev) |
| 202 | { |
| 203 | return pdev->tx_desc.num_free; |
| 204 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 205 | |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 206 | static inline |
| 207 | bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev) |
| 208 | { |
| 209 | return true; |
| 210 | } |
Jeff Johnson | 2338e1a | 2016-12-16 15:59:24 -0800 | [diff] [blame] | 211 | |
Nirav Shah | c4aa1ab | 2018-04-21 12:38:44 +0530 | [diff] [blame^] | 212 | #endif |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 213 | #endif /* _OL_TXRX__H_ */ |