Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Jeff Johnson | 2338e1a | 2016-12-16 15:59:24 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /** |
| 29 | * @file ol_txrx_ctrl_api.h |
| 30 | * @brief Define the host data API functions called by the host control SW. |
| 31 | */ |
| 32 | #ifndef _OL_TXRX_CTRL_API__H_ |
| 33 | #define _OL_TXRX_CTRL_API__H_ |
| 34 | |
| 35 | #include <athdefs.h> /* A_STATUS */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 36 | #include <qdf_nbuf.h> /* qdf_nbuf_t */ |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 37 | #include <qdf_types.h> /* qdf_device_t */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 38 | #include <htc_api.h> /* HTC_HANDLE */ |
| 39 | |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 40 | #include <ol_txrx_api.h> /* ol_sec_type */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 41 | #include <wlan_defs.h> /* MAX_SPATIAL_STREAM */ |
Dhanashri Atre | 12a0839 | 2016-02-17 13:10:34 -0800 | [diff] [blame] | 42 | #include <cdp_txrx_cmn.h> /* ol_pdev_handle, ol_vdev_handle, etc */ |
Manjunathappa Prakash | 10d357a | 2016-03-31 19:20:49 -0700 | [diff] [blame] | 43 | #include <cdp_txrx_cfg.h> |
Leo Chang | 9872676 | 2016-10-28 11:07:18 -0700 | [diff] [blame] | 44 | #include <ol_defines.h> |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 45 | #include <cdp_txrx_handle.h> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 46 | #define OL_ATH_TX_DRAIN_WAIT_DELAY 50 |
| 47 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 48 | /** |
| 49 | * @brief Set up the data SW subsystem. |
| 50 | * @details |
| 51 | * As part of the WLAN device attach, the data SW subsystem has |
| 52 | * to be attached as a component within the WLAN device. |
| 53 | * This attach allocates and initializes the physical device object |
| 54 | * used by the data SW. |
| 55 | * The data SW subsystem attach needs to happen after the target has |
| 56 | * be started, and host / target parameter negotiation has completed, |
| 57 | * since the host data SW uses some of these host/target negotiated |
| 58 | * parameters (e.g. peer ID range) during the initializations within |
| 59 | * its attach function. |
| 60 | * However, the host data SW is not allowed to send HTC messages to the |
| 61 | * target within this pdev_attach function call, since the HTC setup |
| 62 | * has not complete at this stage of initializations. Any messaging |
| 63 | * to the target has to be done in the separate pdev_attach_target call |
| 64 | * that is invoked after HTC setup is complete. |
| 65 | * |
| 66 | * @param pdev - txrx_pdev handle |
| 67 | * @return 0 for success or error code |
| 68 | */ |
| 69 | int |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 70 | ol_txrx_pdev_post_attach(struct cdp_pdev *pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 71 | |
| 72 | /** |
| 73 | * @brief Parameter type to be input to ol_txrx_peer_update |
| 74 | * @details |
| 75 | * This struct is union,to be used to specify various informations to update |
| 76 | * txrx peer object. |
| 77 | */ |
| 78 | union ol_txrx_peer_update_param_t { |
| 79 | uint8_t qos_capable; |
| 80 | uint8_t uapsd_mask; |
| 81 | enum ol_sec_type sec_type; |
| 82 | }; |
| 83 | |
| 84 | /** |
| 85 | * @brief Parameter type to be input to ol_txrx_peer_update |
| 86 | * @details |
| 87 | * This enum is used to specify what exact information in |
| 88 | * ol_txrx_peer_update_param_t |
| 89 | * is used to update the txrx peer object. |
| 90 | */ |
| 91 | enum ol_txrx_peer_update_select_t { |
| 92 | ol_txrx_peer_update_qos_capable = 1, |
| 93 | ol_txrx_peer_update_uapsdMask, |
| 94 | ol_txrx_peer_update_peer_security, |
| 95 | }; |
| 96 | |
| 97 | /** |
| 98 | * @brief Update the data peer object as some informaiton changed in node. |
| 99 | * @details |
| 100 | * Only a single prarameter can be changed for each call to this func. |
| 101 | * |
| 102 | * @param peer - pointer to the node's object |
| 103 | * @param param - new param to be upated in peer object. |
| 104 | * @param select - specify what's parameter needed to be update |
| 105 | */ |
| 106 | void |
| 107 | ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac, |
| 108 | union ol_txrx_peer_update_param_t *param, |
| 109 | enum ol_txrx_peer_update_select_t select); |
| 110 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 111 | #if defined(CONFIG_HL_SUPPORT) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 112 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 113 | * @brief notify tx data SW that a peer-TID is ready to transmit to. |
| 114 | * @details |
| 115 | * This function applies only to HL systems - in LL systems, tx flow control |
| 116 | * is handled entirely within the target FW. |
| 117 | * If a peer-TID has tx paused, then the tx datapath will end up queuing |
| 118 | * any tx frames that arrive from the OS shim for that peer-TID. |
| 119 | * In a HL system, the host tx data SW itself will classify the tx frame, |
| 120 | * and determine that it needs to be queued rather than downloaded to the |
| 121 | * target for transmission. |
| 122 | * Once the peer-TID is ready to accept data, the host control SW will call |
| 123 | * this function to notify the host data SW that the queued frames can be |
| 124 | * enabled for transmission, or specifically to download the tx frames |
| 125 | * to the target to transmit. |
| 126 | * The TID parameter is an extended version of the QoS TID. Values 0-15 |
| 127 | * indicate a regular QoS TID, and the value 16 indicates either non-QoS |
| 128 | * data, multicast data, or broadcast data. |
| 129 | * |
| 130 | * @param data_peer - which peer is being unpaused |
| 131 | * @param tid - which TID within the peer is being unpaused, or -1 as a |
| 132 | * wildcard to unpause all TIDs within the peer |
| 133 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 134 | void |
| 135 | ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid); |
| 136 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 137 | |
| 138 | /** |
| 139 | * @brief Tell a paused peer to release a specified number of tx frames. |
| 140 | * @details |
| 141 | * This function applies only to HL systems - in LL systems, tx flow control |
| 142 | * is handled entirely within the target FW. |
| 143 | * Download up to a specified maximum number of tx frames from the tx |
| 144 | * queues of the specified TIDs within the specified paused peer, usually |
| 145 | * in response to a U-APSD trigger from the peer. |
| 146 | * It is up to the host data SW to determine how to choose frames from the |
| 147 | * tx queues of the specified TIDs. However, the host data SW does need to |
| 148 | * provide long-term fairness across the U-APSD enabled TIDs. |
| 149 | * The host data SW will notify the target data FW when it is done downloading |
| 150 | * the batch of U-APSD triggered tx frames, so the target data FW can |
| 151 | * differentiate between an in-progress download versus a case when there are |
| 152 | * fewer tx frames available than the specified limit. |
| 153 | * This function is relevant primarily to HL U-APSD, where the frames are |
| 154 | * held in the host. |
| 155 | * |
| 156 | * @param peer - which peer sent the U-APSD trigger |
| 157 | * @param tid_mask - bitmask of U-APSD enabled TIDs from whose tx queues |
| 158 | * tx frames can be released |
| 159 | * @param max_frms - limit on the number of tx frames to release from the |
| 160 | * specified TID's queues within the specified peer |
| 161 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 162 | void ol_txrx_tx_release(ol_txrx_peer_handle peer, |
| 163 | u_int32_t tid_mask, |
| 164 | int max_frms); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 165 | |
| 166 | /** |
| 167 | * @brief Suspend all tx data per thermal event/timer for the |
| 168 | * specified physical device |
| 169 | * @details |
| 170 | * This function applies only to HL systerms, and it makes pause and |
| 171 | * unpause operations happen in pairs. |
| 172 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 173 | void |
| 174 | ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev); |
| 175 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 176 | |
| 177 | /** |
| 178 | * @brief Resume all tx data per thermal event/timer for the |
| 179 | * specified physical device |
| 180 | * @details |
| 181 | * This function applies only to HL systerms, and it makes pause and |
| 182 | * unpause operations happen in pairs. |
| 183 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 184 | void |
| 185 | ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev); |
| 186 | |
| 187 | #else |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 188 | static inline void |
| 189 | ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid) |
| 190 | { |
| 191 | return; |
| 192 | } |
| 193 | |
| 194 | static inline void |
| 195 | ol_txrx_tx_release(ol_txrx_peer_handle peer, |
| 196 | u_int32_t tid_mask, |
| 197 | int max_frms) |
| 198 | { |
| 199 | return; |
| 200 | } |
| 201 | |
| 202 | static inline void |
| 203 | ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev) |
| 204 | { |
| 205 | return; |
| 206 | } |
| 207 | |
| 208 | static inline void |
| 209 | ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev) |
| 210 | { |
| 211 | return; |
| 212 | } |
| 213 | |
| 214 | #endif /* CONFIG_HL_SUPPORT */ |
| 215 | |
| 216 | /** |
| 217 | * @brief notify tx data SW that a peer's transmissions are suspended. |
| 218 | * @details |
| 219 | * This function applies only to HL systems - in LL systems, tx flow control |
| 220 | * is handled entirely within the target FW. |
| 221 | * The HL host tx data SW is doing tx classification and tx download |
| 222 | * scheduling, and therefore also needs to actively participate in tx |
| 223 | * flow control. Specifically, the HL tx data SW needs to check whether a |
| 224 | * given peer is available to transmit to, or is paused. |
| 225 | * This function is used to tell the HL tx data SW when a peer is paused, |
| 226 | * so the host tx data SW can hold the tx frames for that SW. |
| 227 | * |
| 228 | * @param data_peer - which peer is being paused |
| 229 | */ |
| 230 | static inline void ol_txrx_peer_pause(struct ol_txrx_peer_t *data_peer) |
| 231 | { |
| 232 | return; |
| 233 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 234 | |
| 235 | /** |
| 236 | * @brief Suspend all tx data for the specified physical device. |
| 237 | * @details |
| 238 | * This function applies only to HL systems - in LL systems, tx flow control |
| 239 | * is handled entirely within the target FW. |
| 240 | * In some systems it is necessary to be able to temporarily |
| 241 | * suspend all WLAN traffic, e.g. to allow another device such as bluetooth |
| 242 | * to temporarily have exclusive access to shared RF chain resources. |
| 243 | * This function suspends tx traffic within the specified physical device. |
| 244 | * |
| 245 | * @param data_pdev - the physical device being paused |
| 246 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 247 | #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \ |
| 248 | defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT) |
| 249 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 250 | void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason); |
| 251 | #else |
| 252 | static inline |
| 253 | void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason) |
| 254 | { |
| 255 | return; |
| 256 | } |
| 257 | #endif |
| 258 | |
| 259 | /** |
| 260 | * @brief Resume tx for the specified physical device. |
| 261 | * @details |
| 262 | * This function applies only to HL systems - in LL systems, tx flow control |
| 263 | * is handled entirely within the target FW. |
| 264 | * |
| 265 | * @param data_pdev - the physical device being unpaused |
| 266 | */ |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 267 | #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \ |
| 268 | defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT) |
| 269 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 270 | void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason); |
| 271 | #else |
| 272 | static inline |
| 273 | void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason) |
| 274 | { |
| 275 | return; |
| 276 | } |
| 277 | #endif |
| 278 | |
| 279 | /** |
| 280 | * @brief Synchronize the data-path tx with a control-path target download |
| 281 | * @dtails |
| 282 | * @param data_pdev - the data-path physical device object |
| 283 | * @param sync_cnt - after the host data-path SW downloads this sync request |
| 284 | * to the target data-path FW, the target tx data-path will hold itself |
| 285 | * in suspension until it is given an out-of-band sync counter value that |
| 286 | * is equal to or greater than this counter value |
| 287 | */ |
| 288 | void ol_txrx_tx_sync(ol_txrx_pdev_handle data_pdev, uint8_t sync_cnt); |
| 289 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 290 | typedef void (*ol_txrx_vdev_delete_cb)(void *context); |
| 291 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 292 | |
| 293 | typedef void |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 294 | (*ol_txrx_data_tx_cb)(void *ctxt, qdf_nbuf_t tx_frm, int had_error); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 295 | |
| 296 | /** |
| 297 | * @brief Store a delivery notification callback for specific data frames. |
| 298 | * @details |
| 299 | * Through a non-std tx function, the txrx SW can be given tx data frames |
| 300 | * that are specially marked to not be unmapped and freed by the tx SW |
| 301 | * when transmission completes. Rather, these specially-marked frames |
| 302 | * are provided to the callback registered with this function. |
| 303 | * |
| 304 | * @param data_vdev - which vdev the callback is being registered with |
| 305 | * (Currently the callback is stored in the pdev rather than the vdev.) |
| 306 | * @param callback - the function to call when tx frames marked as "no free" |
| 307 | * are done being transmitted |
| 308 | * @param ctxt - the context argument provided to the callback function |
| 309 | */ |
| 310 | void |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 311 | ol_txrx_data_tx_cb_set(struct cdp_vdev *data_vdev, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 312 | ol_txrx_data_tx_cb callback, void *ctxt); |
| 313 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 314 | /** |
| 315 | * @brief Discard all tx frames that are pending in txrx. |
| 316 | * @details |
| 317 | * Mainly used in clean up path to make sure all pending tx packets |
| 318 | * held by txrx are returned back to OS shim immediately. |
| 319 | * |
| 320 | * @param pdev - the data physical device object |
| 321 | * @return - void |
| 322 | */ |
| 323 | void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev); |
| 324 | |
| 325 | /** |
| 326 | * @brief set the safemode of the device |
| 327 | * @details |
| 328 | * This flag is used to bypass the encrypt and decrypt processes when send and |
| 329 | * receive packets. It works like open AUTH mode, HW will treate all packets |
| 330 | * as non-encrypt frames because no key installed. For rx fragmented frames, |
| 331 | * it bypasses all the rx defragmentaion. |
| 332 | * |
| 333 | * @param vdev - the data virtual device object |
| 334 | * @param val - the safemode state |
| 335 | * @return - void |
| 336 | */ |
| 337 | void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val); |
| 338 | |
| 339 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 340 | * @brief configure the drop unencrypted frame flag |
| 341 | * @details |
| 342 | * Rx related. When set this flag, all the unencrypted frames |
| 343 | * received over a secure connection will be discarded |
| 344 | * |
| 345 | * @param vdev - the data virtual device object |
| 346 | * @param val - flag |
| 347 | * @return - void |
| 348 | */ |
| 349 | void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val); |
| 350 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 351 | void |
| 352 | ol_txrx_peer_keyinstalled_state_update(ol_txrx_peer_handle data_peer, |
| 353 | uint8_t val); |
| 354 | |
| 355 | #define ol_tx_addba_conf(data_peer, tid, status) /* no-op */ |
| 356 | |
| 357 | /** |
| 358 | * @brief Find a txrx peer handle from the peer's MAC address |
| 359 | * @details |
| 360 | * The control SW typically uses the txrx peer handle to refer to the peer. |
| 361 | * In unusual circumstances, if it is infeasible for the control SW maintain |
| 362 | * the txrx peer handle but it can maintain the peer's MAC address, |
| 363 | * this function allows the peer handled to be retrieved, based on the peer's |
| 364 | * MAC address. |
| 365 | * In cases where there are multiple peer objects with the same MAC address, |
| 366 | * it is undefined which such object is returned. |
| 367 | * This function does not increment the peer's reference count. Thus, it is |
| 368 | * only suitable for use as long as the control SW has assurance that it has |
| 369 | * not deleted the peer object, by calling ol_txrx_peer_detach. |
| 370 | * |
| 371 | * @param pdev - the data physical device object |
| 372 | * @param peer_mac_addr - MAC address of the peer in question |
| 373 | * @return handle to the txrx peer object |
| 374 | */ |
| 375 | ol_txrx_peer_handle |
| 376 | ol_txrx_peer_find_by_addr(ol_txrx_pdev_handle pdev, uint8_t *peer_mac_addr); |
| 377 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 378 | struct ol_txrx_peer_stats_t { |
| 379 | struct { |
| 380 | struct { |
| 381 | uint32_t ucast; |
| 382 | uint32_t mcast; |
| 383 | uint32_t bcast; |
| 384 | } frms; |
| 385 | struct { |
| 386 | uint32_t ucast; |
| 387 | uint32_t mcast; |
| 388 | uint32_t bcast; |
| 389 | } bytes; |
| 390 | } tx; |
| 391 | struct { |
| 392 | struct { |
| 393 | uint32_t ucast; |
| 394 | uint32_t mcast; |
| 395 | uint32_t bcast; |
| 396 | } frms; |
| 397 | struct { |
| 398 | uint32_t ucast; |
| 399 | uint32_t mcast; |
| 400 | uint32_t bcast; |
| 401 | } bytes; |
| 402 | } rx; |
| 403 | }; |
| 404 | |
| 405 | /** |
| 406 | * @brief Provide a snapshot of the txrx counters for the specified peer |
| 407 | * @details |
| 408 | * The txrx layer optionally maintains per-peer stats counters. |
| 409 | * This function provides the caller with a consistent snapshot of the |
| 410 | * txrx stats counters for the specified peer. |
| 411 | * |
| 412 | * @param pdev - the data physical device object |
| 413 | * @param peer - which peer's stats counters are requested |
| 414 | * @param stats - buffer for holding the stats counters snapshot |
| 415 | * @return success / failure status |
| 416 | */ |
| 417 | #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS |
| 418 | A_STATUS |
| 419 | ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev, |
| 420 | ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats); |
| 421 | #else |
| 422 | #define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR /* failure */ |
| 423 | #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */ |
| 424 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 425 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 426 | #define OL_TXRX_RSSI_INVALID 0xffff |
| 427 | /** |
| 428 | * @brief Provide the current RSSI average from data frames sent by a peer. |
| 429 | * @details |
| 430 | * If a peer has sent data frames, the data SW will optionally keep |
| 431 | * a running average of the RSSI observed for those data frames. |
| 432 | * This function returns that time-average RSSI if is it available, |
| 433 | * or OL_TXRX_RSSI_INVALID if either RSSI tracking is disabled or if |
| 434 | * no data frame indications with valid RSSI meta-data have been received. |
| 435 | * The RSSI is in approximate dBm units, and is normalized with respect |
| 436 | * to a 20 MHz channel. For example, if a data frame is received on a |
| 437 | * 40 MHz channel, wherein both the primary 20 MHz channel and the |
| 438 | * secondary 20 MHz channel have an RSSI of -77 dBm, the reported RSSI |
| 439 | * will be -77 dBm, rather than the actual -74 dBm RSSI from the |
| 440 | * combination of the primary + extension 20 MHz channels. |
| 441 | * Alternatively, the RSSI may be evaluated only on the primary 20 MHz |
| 442 | * channel. |
| 443 | * |
| 444 | * @param peer - which peer's RSSI is desired |
| 445 | * @return RSSI evaluted from frames sent by the specified peer |
| 446 | */ |
| 447 | #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI |
| 448 | int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer); |
| 449 | #else |
| 450 | #define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID |
| 451 | #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */ |
| 452 | |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 453 | #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) |
| 454 | |
| 455 | /** |
| 456 | * @brief Configure the bad peer tx limit setting. |
| 457 | * @details |
| 458 | * |
| 459 | * @param pdev - the physics device |
| 460 | */ |
| 461 | void |
| 462 | ol_txrx_bad_peer_txctl_set_setting( |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 463 | struct cdp_pdev *pdev, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 464 | int enable, |
| 465 | int period, |
| 466 | int txq_limit); |
| 467 | |
| 468 | /** |
| 469 | * @brief Configure the bad peer tx threshold limit |
| 470 | * @details |
| 471 | * |
| 472 | * @param pdev - the physics device |
| 473 | */ |
| 474 | void |
| 475 | ol_txrx_bad_peer_txctl_update_threshold( |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 476 | struct cdp_pdev *pdev, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 477 | int level, |
| 478 | int tput_thresh, |
| 479 | int tx_limit); |
| 480 | |
| 481 | #else |
| 482 | |
| 483 | static inline void |
| 484 | ol_txrx_bad_peer_txctl_set_setting( |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 485 | struct cdp_pdev *pdev, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 486 | int enable, |
| 487 | int period, |
| 488 | int txq_limit) |
| 489 | { |
| 490 | return; |
| 491 | } |
| 492 | |
| 493 | static inline void |
| 494 | ol_txrx_bad_peer_txctl_update_threshold( |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 495 | struct cdp_pdev *pdev, |
Siddarth Poddar | b2011f6 | 2016-04-27 20:45:42 +0530 | [diff] [blame] | 496 | int level, |
| 497 | int tput_thresh, |
| 498 | int tx_limit) |
| 499 | { |
| 500 | return; |
| 501 | } |
| 502 | #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */ |
| 503 | |
| 504 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 505 | void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev, |
| 506 | struct ol_txrx_peer_t *peer); |
| 507 | |
| 508 | bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev, |
| 509 | struct ol_txrx_peer_t **peer); |
| 510 | |
Nirav Shah | 22bf44d | 2015-12-10 15:39:48 +0530 | [diff] [blame] | 511 | void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value); |
| 512 | uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void); |
| 513 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 514 | /* TX FLOW Control related functions */ |
| 515 | #ifdef QCA_LL_TX_FLOW_CONTROL_V2 |
| 516 | #define TX_FLOW_MGMT_POOL_ID 0xEF |
| 517 | |
| 518 | #ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL |
| 519 | #define TX_FLOW_MGMT_POOL_SIZE 32 |
| 520 | #else |
| 521 | #define TX_FLOW_MGMT_POOL_SIZE 0 |
| 522 | #endif |
| 523 | |
| 524 | void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev); |
| 525 | void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev); |
| 526 | void ol_tx_dump_flow_pool_info(void); |
| 527 | void ol_tx_clear_flow_pool_stats(void); |
| 528 | void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type, |
| 529 | uint8_t flow_pool_id, uint16_t flow_pool_size); |
| 530 | void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type, |
| 531 | uint8_t flow_pool_id); |
| 532 | struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id, |
| 533 | uint16_t flow_pool_size); |
Himanshu Agarwal | 7d367c1 | 2017-03-30 17:16:55 +0530 | [diff] [blame^] | 534 | |
| 535 | /** |
| 536 | * ol_tx_inc_pool_ref() - increment pool ref count |
| 537 | * @pool: flow pool pointer |
| 538 | * |
| 539 | * Increments pool's ref count, used to make sure that no one is using |
| 540 | * pool when it is being deleted. |
| 541 | * As this function is taking pool->flow_pool_lock inside it, it should |
| 542 | * always be called outside this spinlock. |
| 543 | * |
| 544 | * Return: QDF_STATUS_SUCCESS - in case of success |
| 545 | */ |
| 546 | QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool); |
| 547 | |
| 548 | /** |
| 549 | * ol_tx_dec_pool_ref() - decrement pool ref count |
| 550 | * @pool: flow pool pointer |
| 551 | * @force: free pool forcefully |
| 552 | * |
| 553 | * Decrements pool's ref count and deletes the pool if ref count gets 0. |
| 554 | * As this function is taking pdev->tx_desc.flow_pool_list_lock and |
| 555 | * pool->flow_pool_lock inside it, it should always be called outside |
| 556 | * these two spinlocks. |
| 557 | * |
| 558 | * Return: QDF_STATUS_SUCCESS - in case of success |
| 559 | */ |
| 560 | QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 561 | #else |
| 562 | |
| 563 | static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev) |
| 564 | { |
| 565 | return; |
| 566 | } |
| 567 | static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev) |
| 568 | { |
| 569 | return; |
| 570 | } |
| 571 | static inline void ol_tx_dump_flow_pool_info(void) |
| 572 | { |
| 573 | return; |
| 574 | } |
| 575 | static inline void ol_tx_clear_flow_pool_stats(void) |
| 576 | { |
| 577 | return; |
| 578 | } |
| 579 | static inline void ol_tx_flow_pool_map_handler(uint8_t flow_id, |
| 580 | uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size) |
| 581 | { |
| 582 | return; |
| 583 | } |
| 584 | static inline void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, |
| 585 | uint8_t flow_type, uint8_t flow_pool_id) |
| 586 | { |
| 587 | return; |
| 588 | } |
| 589 | static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool( |
| 590 | uint8_t flow_pool_id, uint16_t flow_pool_size) |
| 591 | { |
| 592 | return NULL; |
| 593 | } |
Himanshu Agarwal | 7d367c1 | 2017-03-30 17:16:55 +0530 | [diff] [blame^] | 594 | static inline QDF_STATUS |
| 595 | ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 596 | { |
Himanshu Agarwal | 7d367c1 | 2017-03-30 17:16:55 +0530 | [diff] [blame^] | 597 | return QDF_STATUS_SUCCESS; |
| 598 | } |
| 599 | static inline QDF_STATUS |
| 600 | ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force) |
| 601 | { |
| 602 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 603 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 604 | #endif |
| 605 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 606 | #endif /* _OL_TXRX_CTRL_API__H_ */ |