Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1 | /* |
Vivek Natarajan | 95f004f | 2019-01-10 22:15:46 +0530 | [diff] [blame] | 2 | * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 3 | * |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for |
| 6 | * any purpose with or without fee is hereby granted, provided that the |
| 7 | * above copyright notice and this permission notice appear in all |
| 8 | * copies. |
| 9 | * |
| 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 11 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 12 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 13 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 14 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 15 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 16 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 17 | * PERFORMANCE OF THIS SOFTWARE. |
| 18 | */ |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 19 | /** |
| 20 | * @file cdp_txrx_ops.h |
| 21 | * @brief Define the host data path converged API functions |
| 22 | * called by the host control SW and the OS interface module |
| 23 | */ |
| 24 | #ifndef _CDP_TXRX_CMN_OPS_H_ |
| 25 | #define _CDP_TXRX_CMN_OPS_H_ |
| 26 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 27 | #include <cdp_txrx_cmn_struct.h> |
Nandha Kishore Easwaran | fd7832e | 2016-11-20 18:22:48 +0530 | [diff] [blame] | 28 | #include <cdp_txrx_stats_struct.h> |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 29 | #include "cdp_txrx_handle.h" |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 30 | #include <cdp_txrx_mon_struct.h> |
Pramod Simha | 7f7b4aa | 2017-03-27 14:48:09 -0700 | [diff] [blame] | 31 | #include "wlan_objmgr_psoc_obj.h" |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 32 | #include <wmi_unified_api.h> |
| 33 | #include <wdi_event_api.h> |
Yun Park | fd269b5 | 2017-10-05 14:41:32 -0700 | [diff] [blame] | 34 | |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 35 | #ifdef IPA_OFFLOAD |
Yun Park | 1ba3ada | 2018-01-11 11:38:41 -0800 | [diff] [blame] | 36 | #ifdef CONFIG_IPA_WDI_UNIFIED_API |
| 37 | #include <qdf_ipa_wdi3.h> |
| 38 | #else |
Yun Park | fd269b5 | 2017-10-05 14:41:32 -0700 | [diff] [blame] | 39 | #include <qdf_ipa.h> |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 40 | #endif |
Yun Park | 1ba3ada | 2018-01-11 11:38:41 -0800 | [diff] [blame] | 41 | #endif |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 42 | |
Naveen Rawat | 761329b | 2017-09-19 10:30:11 -0700 | [diff] [blame] | 43 | /** |
| 44 | * bitmap values to indicate special handling of peer_delete |
| 45 | */ |
| 46 | #define CDP_PEER_DELETE_NO_SPECIAL 0 |
| 47 | #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 |
| 48 | |
Akshay Kosigi | 4002f76 | 2019-07-08 23:04:36 +0530 | [diff] [blame] | 49 | struct hif_opaque_softc; |
| 50 | |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 51 | /* same as ieee80211_nac_param */ |
| 52 | enum cdp_nac_param_cmd { |
| 53 | /* IEEE80211_NAC_PARAM_ADD */ |
| 54 | CDP_NAC_PARAM_ADD = 1, |
| 55 | /* IEEE80211_NAC_PARAM_DEL */ |
| 56 | CDP_NAC_PARAM_DEL, |
| 57 | /* IEEE80211_NAC_PARAM_LIST */ |
| 58 | CDP_NAC_PARAM_LIST, |
| 59 | }; |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 60 | /****************************************************************************** |
| 61 | * |
| 62 | * Control Interface (A Interface) |
| 63 | * |
| 64 | *****************************************************************************/ |
| 65 | |
| 66 | struct cdp_cmn_ops { |
| 67 | |
Venkata Sharath Chandra Manchala | 8747958 | 2018-08-01 12:45:34 -0700 | [diff] [blame] | 68 | QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 69 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 70 | int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 71 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 72 | struct cdp_vdev *(*txrx_vdev_attach) |
| 73 | (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, |
Rakesh Pillai | 01b9b68 | 2019-07-27 18:58:21 -0700 | [diff] [blame] | 74 | uint8_t vdev_id, enum wlan_op_mode op_mode, |
| 75 | enum wlan_op_subtype subtype); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 76 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 77 | void (*txrx_vdev_detach) |
| 78 | (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 79 | void *cb_context); |
| 80 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 81 | struct cdp_pdev *(*txrx_pdev_attach) |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 82 | (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev, |
| 83 | qdf_device_t osdev, uint8_t pdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 84 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 85 | int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 86 | |
Himanshu Agarwal | b7e3c98 | 2017-02-23 16:26:33 +0530 | [diff] [blame] | 87 | void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); |
| 88 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 89 | void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 90 | |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 91 | /** |
| 92 | * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory |
| 93 | * @pdev: Dp pdev handle |
| 94 | * @force: Force deinit or not |
| 95 | * |
| 96 | * Return: None |
| 97 | */ |
| 98 | void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force); |
| 99 | |
Dhanashri Atre | 6d90ef3 | 2016-11-10 16:27:38 -0800 | [diff] [blame] | 100 | void *(*txrx_peer_create) |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 101 | (struct cdp_vdev *vdev, uint8_t *peer_mac_addr); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 102 | |
Dhanashri Atre | 6d90ef3 | 2016-11-10 16:27:38 -0800 | [diff] [blame] | 103 | void (*txrx_peer_setup) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 104 | (struct cdp_vdev *vdev_hdl, void *peer_hdl); |
Dhanashri Atre | 6d90ef3 | 2016-11-10 16:27:38 -0800 | [diff] [blame] | 105 | |
Pavankumar Nandeshwar | 1ab908e | 2019-01-24 12:53:13 +0530 | [diff] [blame] | 106 | void (*txrx_cp_peer_del_response) |
| 107 | (ol_txrx_soc_handle soc, struct cdp_vdev *vdev_hdl, |
| 108 | uint8_t *peer_mac_addr); |
| 109 | |
Dhanashri Atre | 6d90ef3 | 2016-11-10 16:27:38 -0800 | [diff] [blame] | 110 | void (*txrx_peer_teardown) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 111 | (struct cdp_vdev *vdev_hdl, void *peer_hdl); |
Dhanashri Atre | 6d90ef3 | 2016-11-10 16:27:38 -0800 | [diff] [blame] | 112 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 113 | int (*txrx_peer_add_ast) |
| 114 | (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, |
| 115 | uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, |
| 116 | uint32_t flags); |
| 117 | |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 118 | int (*txrx_peer_update_ast) |
| 119 | (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, |
Ruchi, Agrawal | 89219d9 | 2018-02-26 16:43:06 +0530 | [diff] [blame] | 120 | uint8_t *mac_addr, uint32_t flags); |
| 121 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 122 | bool (*txrx_peer_get_ast_info_by_soc) |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 123 | (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 124 | struct cdp_ast_entry_info *ast_entry_info); |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 125 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 126 | bool (*txrx_peer_get_ast_info_by_pdev) |
| 127 | (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, |
| 128 | uint8_t pdev_id, |
| 129 | struct cdp_ast_entry_info *ast_entry_info); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 130 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 131 | QDF_STATUS (*txrx_peer_ast_delete_by_soc) |
| 132 | (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, |
| 133 | txrx_ast_free_cb callback, |
| 134 | void *cookie); |
Tallapragada Kalyan | 57b6bb3 | 2018-01-02 12:58:33 +0530 | [diff] [blame] | 135 | |
Chaithanya Garrepalli | 9cc562c | 2018-11-16 18:30:41 +0530 | [diff] [blame] | 136 | QDF_STATUS (*txrx_peer_ast_delete_by_pdev) |
| 137 | (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr, |
| 138 | uint8_t pdev_id, |
| 139 | txrx_ast_free_cb callback, |
| 140 | void *cookie); |
Chaithanya Garrepalli | cf347d1 | 2018-09-18 14:28:55 +0530 | [diff] [blame] | 141 | |
Naveen Rawat | 761329b | 2017-09-19 10:30:11 -0700 | [diff] [blame] | 142 | void (*txrx_peer_delete)(void *peer, uint32_t bitmap); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 143 | |
Pavankumar Nandeshwar | 753eed3 | 2019-01-22 15:40:15 +0530 | [diff] [blame] | 144 | void (*txrx_vdev_flush_peers)(struct cdp_vdev *vdev, bool unmap_only); |
| 145 | |
Venkata Sharath Chandra Manchala | 8747958 | 2018-08-01 12:45:34 -0700 | [diff] [blame] | 146 | QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, |
| 147 | uint8_t smart_monitor); |
Alok Kumar | 2e254c5 | 2018-11-28 17:26:53 +0530 | [diff] [blame] | 148 | void (*txrx_peer_delete_sync)(void *peer, |
| 149 | QDF_STATUS(*delete_cb)( |
| 150 | uint8_t vdev_id, |
| 151 | uint32_t peerid_cnt, |
| 152 | uint16_t *peerid_list), |
| 153 | uint32_t bitmap); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 154 | |
Alok Kumar | 14b3ba0 | 2019-02-14 14:37:02 +0530 | [diff] [blame] | 155 | void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev, |
| 156 | QDF_STATUS(*unmap_resp_cb)( |
| 157 | uint8_t vdev_id, |
| 158 | uint32_t peerid_cnt, |
| 159 | uint16_t *peerid_list)); |
| 160 | |
phadiman | 7821bf8 | 2018-02-06 16:03:54 +0530 | [diff] [blame] | 161 | uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); |
Vivek Natarajan | 95f004f | 2019-01-10 22:15:46 +0530 | [diff] [blame] | 162 | bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev); |
phadiman | 7821bf8 | 2018-02-06 16:03:54 +0530 | [diff] [blame] | 163 | |
Adil Saeed Musthafa | 61a2169 | 2018-07-17 20:49:31 -0700 | [diff] [blame] | 164 | void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev, |
| 165 | int16_t chan_noise_floor); |
| 166 | |
phadiman | 7821bf8 | 2018-02-06 16:03:54 +0530 | [diff] [blame] | 167 | void (*txrx_set_nac)(struct cdp_peer *peer); |
| 168 | |
Chaithanya Garrepalli | 7ab76ae | 2018-07-05 14:53:50 +0530 | [diff] [blame] | 169 | /** |
| 170 | * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture |
| 171 | * @soc: opaque soc handle |
| 172 | * @pdev: data path pdev handle |
| 173 | * @val: value of pdev_tx_capture |
| 174 | * |
| 175 | * Return: status: 0 - Success, non-zero: Failure |
| 176 | */ |
| 177 | QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); |
phadiman | 7821bf8 | 2018-02-06 16:03:54 +0530 | [diff] [blame] | 178 | |
| 179 | void (*txrx_get_peer_mac_from_peer_id) |
| 180 | (struct cdp_pdev *pdev_handle, |
| 181 | uint32_t peer_id, uint8_t *peer_mac); |
| 182 | |
| 183 | void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); |
| 184 | |
| 185 | void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); |
| 186 | |
Chaithanya Garrepalli | 2faa46f | 2018-04-09 12:34:20 +0530 | [diff] [blame] | 187 | void (*txrx_ath_getstats)(void *pdev, |
| 188 | struct cdp_dev_stats *stats, uint8_t type); |
phadiman | 7821bf8 | 2018-02-06 16:03:54 +0530 | [diff] [blame] | 189 | |
| 190 | void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, |
| 191 | u_int8_t *user_position); |
| 192 | |
| 193 | uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); |
| 194 | |
| 195 | void (*txrx_if_mgmt_drain)(void *ni, int force); |
| 196 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 197 | void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 198 | |
| 199 | void (*txrx_set_privacy_filters) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 200 | (struct cdp_vdev *vdev, void *filter, uint32_t num); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 201 | |
jitiphil | 60ac9aa | 2018-10-05 19:54:04 +0530 | [diff] [blame] | 202 | uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg); |
| 203 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 204 | /******************************************************************** |
| 205 | * Data Interface (B Interface) |
| 206 | ********************************************************************/ |
| 207 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 208 | void (*txrx_vdev_register)(struct cdp_vdev *vdev, |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 209 | void *osif_vdev, |
Akshay Kosigi | dbbaef4 | 2018-05-03 23:39:27 +0530 | [diff] [blame] | 210 | struct ol_txrx_ops *txrx_ops); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 211 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 212 | int (*txrx_mgmt_send)(struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 213 | qdf_nbuf_t tx_mgmt_frm, uint8_t type); |
| 214 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 215 | int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 216 | qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, |
| 217 | uint16_t chanfreq); |
| 218 | |
| 219 | /** |
| 220 | * ol_txrx_mgmt_tx_cb - tx management delivery notification |
| 221 | * callback function |
| 222 | */ |
| 223 | |
Sravan Kumar Kairam | 786886b | 2017-07-19 17:38:20 +0530 | [diff] [blame] | 224 | void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, |
| 225 | ol_txrx_mgmt_tx_cb download_cb, |
| 226 | ol_txrx_mgmt_tx_cb ota_ack_cb, |
| 227 | void *ctxt); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 228 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 229 | int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 230 | |
| 231 | /** |
| 232 | * ol_txrx_data_tx_cb - Function registered with the data path |
| 233 | * that is called when tx frames marked as "no free" are |
| 234 | * done being transmitted |
| 235 | */ |
| 236 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 237 | void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 238 | ol_txrx_data_tx_cb callback, void *ctxt); |
| 239 | |
| 240 | /******************************************************************* |
Jeff Johnson | ff2dfb2 | 2018-05-12 10:27:57 -0700 | [diff] [blame] | 241 | * Statistics and Debugging Interface (C Interface) |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 242 | ********************************************************************/ |
| 243 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 244 | int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 245 | int max_subfrms_amsdu); |
| 246 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 247 | A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, |
| 248 | struct ol_txrx_stats_req *req, |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 249 | bool per_vdev, bool response_expected); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 250 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 251 | int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 252 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 253 | void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 254 | uint8_t cfg_stats_type, uint32_t cfg_val); |
| 255 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 256 | void (*txrx_print_level_set)(unsigned level); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 257 | |
| 258 | /** |
| 259 | * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev |
| 260 | * @vdev: vdev handle |
| 261 | * |
| 262 | * Return: vdev mac address |
| 263 | */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 264 | uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 265 | |
| 266 | /** |
| 267 | * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of |
| 268 | * vdev |
| 269 | * @vdev: vdev handle |
| 270 | * |
| 271 | * Return: Handle to struct qdf_mac_addr |
| 272 | */ |
| 273 | struct qdf_mac_addr * |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 274 | (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 275 | |
| 276 | /** |
| 277 | * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev |
| 278 | * @vdev: vdev handle |
| 279 | * |
| 280 | * Return: Handle to pdev |
| 281 | */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 282 | struct cdp_pdev *(*txrx_get_pdev_from_vdev) |
| 283 | (struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 284 | |
| 285 | /** |
| 286 | * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev |
| 287 | * @vdev: vdev handle |
| 288 | * |
| 289 | * Return: Handle to control pdev |
| 290 | */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 291 | struct cdp_cfg * |
| 292 | (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 293 | |
chenguo | 2a73379 | 2018-11-01 16:10:38 +0800 | [diff] [blame] | 294 | /** |
| 295 | * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev |
| 296 | * @pdev: pdev handle |
| 297 | * |
| 298 | * Return: Handle to vdev |
| 299 | */ |
| 300 | struct cdp_vdev * |
| 301 | (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev); |
| 302 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 303 | struct cdp_vdev * |
| 304 | (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, |
| 305 | uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 306 | |
| 307 | void (*txrx_soc_detach)(void *soc); |
| 308 | |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 309 | /** |
| 310 | * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory |
| 311 | * @soc: Opaque Dp handle |
| 312 | * |
| 313 | * Return: None |
| 314 | */ |
| 315 | void (*txrx_soc_deinit)(void *soc); |
| 316 | |
| 317 | /** |
| 318 | * txrx_soc_init() - Initialize dp soc and dp ring memory |
| 319 | * @soc: Opaque Dp handle |
| 320 | * @htchdl: Opaque htc handle |
| 321 | * @hifhdl: Opaque hif handle |
| 322 | * |
| 323 | * Return: None |
| 324 | */ |
Akshay Kosigi | eec6db9 | 2019-07-02 14:25:54 +0530 | [diff] [blame] | 325 | void *(*txrx_soc_init)(void *soc, |
| 326 | struct cdp_ctrl_objmgr_psoc *ctrl_psoc, |
Akshay Kosigi | 4002f76 | 2019-07-08 23:04:36 +0530 | [diff] [blame] | 327 | struct hif_opaque_softc *hif_handle, |
Anish Nataraj | e9d4c3b | 2018-11-24 22:24:56 +0530 | [diff] [blame] | 328 | HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, |
| 329 | struct ol_if_ops *ol_ops, uint16_t device_id); |
| 330 | |
| 331 | /** |
| 332 | * txrx_tso_soc_attach() - TSO attach handler triggered during |
| 333 | * dynamic tso activation |
| 334 | * @soc: Opaque Dp handle |
| 335 | * |
| 336 | * Return: QDF status |
| 337 | */ |
| 338 | QDF_STATUS (*txrx_tso_soc_attach)(void *soc); |
| 339 | |
| 340 | /** |
| 341 | * txrx_tso_soc_detach() - TSO detach handler triggered during |
| 342 | * dynamic tso de-activation |
| 343 | * @soc: Opaque Dp handle |
| 344 | * |
| 345 | * Return: QDF status |
| 346 | */ |
| 347 | QDF_STATUS (*txrx_tso_soc_detach)(void *soc); |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 348 | int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid, |
| 349 | int status); |
| 350 | |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 351 | int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, |
Sumedh Baikady | 1c61e06 | 2018-02-12 22:25:47 -0800 | [diff] [blame] | 352 | uint16_t tid, uint16_t batimeout, |
| 353 | uint16_t buffersize, |
| 354 | uint16_t startseqnum); |
Karunakar Dasineni | ed1de12 | 2016-08-02 11:57:59 -0700 | [diff] [blame] | 355 | |
| 356 | void (*addba_responsesetup)(void *peer_handle, uint8_t tid, |
| 357 | uint8_t *dialogtoken, uint16_t *statuscode, |
| 358 | uint16_t *buffersize, uint16_t *batimeout); |
| 359 | |
| 360 | int (*delba_process)(void *peer_handle, |
| 361 | int tid, uint16_t reasoncode); |
Ishank Jain | 1e7401c | 2017-02-17 15:38:39 +0530 | [diff] [blame] | 362 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 363 | /** |
| 364 | * delba_tx_completion() - Indicate delba tx status |
| 365 | * @peer_handle: Peer handle |
| 366 | * @tid: Tid number |
| 367 | * @status: Tx completion status |
| 368 | * |
| 369 | * Return: 0 on Success, 1 on failure |
| 370 | */ |
| 371 | int (*delba_tx_completion)(void *peer_handle, |
| 372 | uint8_t tid, int status); |
| 373 | |
Gyanranjan Hazarika | 99a58d3 | 2017-12-22 21:56:17 -0800 | [diff] [blame] | 374 | void (*set_addba_response)(void *peer_handle, |
| 375 | uint8_t tid, uint16_t statuscode); |
| 376 | |
Ishank Jain | 1e7401c | 2017-02-17 15:38:39 +0530 | [diff] [blame] | 377 | uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, |
| 378 | uint16_t peer_id, uint8_t *mac_addr); |
Ishank Jain | 949674c | 2017-02-27 17:09:29 +0530 | [diff] [blame] | 379 | |
| 380 | void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, |
| 381 | uint8_t map_id); |
Pranita Solanke | 92096e4 | 2018-09-11 11:14:51 +0530 | [diff] [blame] | 382 | int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle); |
Ishank Jain | 949674c | 2017-02-27 17:09:29 +0530 | [diff] [blame] | 383 | |
Manikandan Mohan | e2fa8b7 | 2017-03-22 11:18:26 -0700 | [diff] [blame] | 384 | void (*flush_cache_rx_queue)(void); |
Ishank Jain | 949674c | 2017-02-27 17:09:29 +0530 | [diff] [blame] | 385 | void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, |
| 386 | uint8_t tos, uint8_t tid); |
Shashikala Prabhu | 8f6703b | 2018-10-31 09:43:00 +0530 | [diff] [blame] | 387 | void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val); |
| 388 | void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid); |
Ishank Jain | 949674c | 2017-02-27 17:09:29 +0530 | [diff] [blame] | 389 | |
Rakesh Pillai | e5430cb | 2019-11-06 16:48:53 +0530 | [diff] [blame] | 390 | QDF_STATUS(*txrx_stats_request)(struct cdp_soc_t *soc_handle, |
| 391 | uint8_t vdev_id, |
| 392 | struct cdp_txrx_stats_req *req); |
Om Prakash Tripathi | 03efb6a | 2017-08-23 22:51:28 +0530 | [diff] [blame] | 393 | |
Mohit Khanna | 90d7ebd | 2017-09-12 21:54:21 -0700 | [diff] [blame] | 394 | QDF_STATUS (*display_stats)(void *psoc, uint16_t value, |
| 395 | enum qdf_stats_verbosity_level level); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 396 | void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); |
| 397 | |
| 398 | int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); |
Venkateswara Swamy Bandaru | a95b324 | 2017-05-19 20:20:30 +0530 | [diff] [blame] | 399 | QDF_STATUS (*txrx_intr_attach)(void *soc); |
| 400 | void (*txrx_intr_detach)(void *soc); |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 401 | void (*set_pn_check)(struct cdp_vdev *vdev, |
| 402 | struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, |
| 403 | uint32_t *rx_pn); |
Venkata Sharath Chandra Manchala | 3e8add8 | 2017-07-10 11:59:54 -0700 | [diff] [blame] | 404 | QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, |
| 405 | struct cdp_config_params *params); |
Santosh Anbu | 2280e86 | 2018-01-03 22:25:53 +0530 | [diff] [blame] | 406 | |
| 407 | void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); |
Pamidipati, Vijay | d3478ef | 2018-02-06 23:52:29 +0530 | [diff] [blame] | 408 | void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, |
| 409 | void *dp_txrx_hdl); |
| 410 | |
| 411 | void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); |
| 412 | void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, |
| 413 | void *dp_txrx_handle); |
| 414 | |
Padma Raghunathan | 93549e1 | 2019-02-28 14:30:55 +0530 | [diff] [blame] | 415 | void (*map_pdev_to_lmac)(struct cdp_pdev *pdev_hdl, |
| 416 | uint32_t lmac_id); |
| 417 | |
Gyanranjan Hazarika | e804726 | 2019-06-05 00:43:38 -0700 | [diff] [blame] | 418 | void (*set_pdev_status_down)(struct cdp_pdev *pdev_hdl, bool is_pdev_down); |
| 419 | |
Ruchi, Agrawal | 89219d9 | 2018-02-26 16:43:06 +0530 | [diff] [blame] | 420 | void (*txrx_peer_reset_ast) |
Chaithanya Garrepalli | 267ae0e | 2019-02-19 23:45:12 +0530 | [diff] [blame] | 421 | (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, |
| 422 | uint8_t *peer_macaddr, void *vdev_hdl); |
Ruchi, Agrawal | 89219d9 | 2018-02-26 16:43:06 +0530 | [diff] [blame] | 423 | |
Santosh Anbu | 76693bc | 2018-04-23 16:38:54 +0530 | [diff] [blame] | 424 | void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, |
| 425 | void *vdev_hdl); |
Ruchi, Agrawal | 89219d9 | 2018-02-26 16:43:06 +0530 | [diff] [blame] | 426 | |
| 427 | void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); |
sumedh baikady | 1f8f319 | 2018-02-20 17:30:32 -0800 | [diff] [blame] | 428 | void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle, |
| 429 | uint8_t ac, uint32_t value); |
| 430 | void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle, |
| 431 | uint8_t ac, uint32_t *value); |
Ruchi, Agrawal | 89219d9 | 2018-02-26 16:43:06 +0530 | [diff] [blame] | 432 | |
Chaithanya Garrepalli | 2f57279 | 2018-04-11 17:49:28 +0530 | [diff] [blame] | 433 | QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 434 | uint32_t num_peers, |
Tallapragada Kalyan | a702362 | 2018-12-03 19:29:52 +0530 | [diff] [blame] | 435 | uint32_t max_ast_index, |
Chaithanya Garrepalli | 3e93e5f | 2018-09-12 17:02:31 +0530 | [diff] [blame] | 436 | bool peer_map_unmap_v2); |
Chaithanya Garrepalli | 2f57279 | 2018-04-11 17:49:28 +0530 | [diff] [blame] | 437 | |
Pamidipati, Vijay | d3478ef | 2018-02-06 23:52:29 +0530 | [diff] [blame] | 438 | ol_txrx_tx_fp tx_send; |
Mohit Khanna | 7ac554b | 2018-05-24 11:58:13 -0700 | [diff] [blame] | 439 | /** |
| 440 | * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev |
| 441 | * to deliver pkt to stack. |
| 442 | * @vdev: vdev handle |
| 443 | * @stack_fn: pointer to - function pointer to deliver RX pkt to stack |
| 444 | * @osif_vdev: pointer to - osif vdev to deliver RX packet to. |
| 445 | */ |
| 446 | void (*txrx_get_os_rx_handles_from_vdev) |
| 447 | (struct cdp_vdev *vdev, |
| 448 | ol_txrx_rx_fp *stack_fn, |
| 449 | ol_osif_vdev_handle *osif_vdev); |
Pranita Solanke | afcd0f1 | 2018-08-29 22:49:23 +0530 | [diff] [blame] | 450 | int (*txrx_classify_update) |
| 451 | (struct cdp_vdev *vdev, qdf_nbuf_t skb, |
| 452 | enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class); |
Akshay Kosigi | a4f6e17 | 2018-09-03 21:42:27 +0530 | [diff] [blame] | 453 | |
| 454 | bool (*get_dp_capabilities)(struct cdp_soc_t *soc, |
| 455 | enum cdp_capabilities dp_caps); |
Amir Patel | 256dcbe | 2019-02-26 21:49:24 +0530 | [diff] [blame] | 456 | void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx); |
| 457 | void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc); |
| 458 | void (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc, |
| 459 | struct cdp_pdev *pdev, |
| 460 | void *buf); |
| 461 | void (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc, |
| 462 | struct cdp_pdev *pdev); |
Debasis Das | a3249bd | 2019-03-01 11:58:22 +0530 | [diff] [blame] | 463 | QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_pdev *pdev, |
| 464 | uint8_t pcp, uint8_t tid); |
| 465 | QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty); |
| 466 | QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_vdev *vdev, |
| 467 | uint8_t pcp, uint8_t tid); |
| 468 | QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty); |
| 469 | QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev, |
| 470 | uint8_t mapid); |
Varsha Mishra | 6e1760c | 2019-07-27 22:51:42 +0530 | [diff] [blame] | 471 | #ifdef QCA_MULTIPASS_SUPPORT |
| 472 | QDF_STATUS (*set_vlan_groupkey)(struct cdp_vdev *vdev_handle, |
| 473 | uint16_t vlan_id, uint16_t group_key); |
| 474 | #endif |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 475 | }; |
| 476 | |
| 477 | struct cdp_ctrl_ops { |
| 478 | |
| 479 | int |
| 480 | (*txrx_mempools_attach)(void *ctrl_pdev); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 481 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 482 | int |
| 483 | (*txrx_set_filter_neighbour_peers)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 484 | struct cdp_pdev *pdev, |
Pratik Gandhi | 8b8334b | 2017-03-09 17:41:40 +0530 | [diff] [blame] | 485 | uint32_t val); |
| 486 | int |
| 487 | (*txrx_update_filter_neighbour_peers)( |
Chaithanya Garrepalli | 95fc62f | 2018-07-24 18:52:27 +0530 | [diff] [blame] | 488 | struct cdp_vdev *vdev, |
Pratik Gandhi | 8b8334b | 2017-03-09 17:41:40 +0530 | [diff] [blame] | 489 | uint32_t cmd, uint8_t *macaddr); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 490 | /** |
| 491 | * @brief set the safemode of the device |
| 492 | * @details |
| 493 | * This flag is used to bypass the encrypt and decrypt processes when |
| 494 | * send and receive packets. It works like open AUTH mode, HW will |
| 495 | * ctreate all packets as non-encrypt frames because no key installed. |
| 496 | * For rx fragmented frames,it bypasses all the rx defragmentaion. |
| 497 | * |
| 498 | * @param vdev - the data virtual device object |
| 499 | * @param val - the safemode state |
| 500 | * @return - void |
| 501 | */ |
| 502 | |
| 503 | void |
| 504 | (*txrx_set_safemode)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 505 | struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 506 | u_int32_t val); |
| 507 | /** |
| 508 | * @brief configure the drop unencrypted frame flag |
| 509 | * @details |
| 510 | * Rx related. When set this flag, all the unencrypted frames |
| 511 | * received over a secure connection will be discarded |
| 512 | * |
| 513 | * @param vdev - the data virtual device object |
| 514 | * @param val - flag |
| 515 | * @return - void |
| 516 | */ |
| 517 | void |
| 518 | (*txrx_set_drop_unenc)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 519 | struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 520 | u_int32_t val); |
| 521 | |
| 522 | |
| 523 | /** |
| 524 | * @brief set the Tx encapsulation type of the VDEV |
| 525 | * @details |
| 526 | * This will be used to populate the HTT desc packet type field |
| 527 | * during Tx |
| 528 | * @param vdev - the data virtual device object |
| 529 | * @param val - the Tx encap type |
| 530 | * @return - void |
| 531 | */ |
| 532 | void |
| 533 | (*txrx_set_tx_encap_type)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 534 | struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | 870abda | 2016-11-16 17:37:19 +0530 | [diff] [blame] | 535 | enum htt_cmn_pkt_type val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 536 | /** |
| 537 | * @brief set the Rx decapsulation type of the VDEV |
| 538 | * @details |
| 539 | * This will be used to configure into firmware and hardware |
| 540 | * which format to decap all Rx packets into, for all peers under |
| 541 | * the VDEV. |
| 542 | * @param vdev - the data virtual device object |
| 543 | * @param val - the Rx decap mode |
| 544 | * @return - void |
| 545 | */ |
| 546 | void |
| 547 | (*txrx_set_vdev_rx_decap_type)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 548 | struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | 870abda | 2016-11-16 17:37:19 +0530 | [diff] [blame] | 549 | enum htt_cmn_pkt_type val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 550 | |
| 551 | /** |
| 552 | * @brief get the Rx decapsulation type of the VDEV |
| 553 | * |
| 554 | * @param vdev - the data virtual device object |
| 555 | * @return - the Rx decap type |
| 556 | */ |
Nandha Kishore Easwaran | fb0a7e5 | 2017-02-03 21:18:49 +0530 | [diff] [blame] | 557 | enum htt_cmn_pkt_type |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 558 | (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 559 | |
| 560 | /* Is this similar to ol_txrx_peer_state_update() in MCL */ |
| 561 | /** |
| 562 | * @brief Update the authorize peer object at association time |
| 563 | * @details |
| 564 | * For the host-based implementation of rate-control, it |
| 565 | * updates the peer/node-related parameters within rate-control |
| 566 | * context of the peer at association. |
| 567 | * |
| 568 | * @param peer - pointer to the node's object |
| 569 | * @authorize - either to authorize or unauthorize peer |
| 570 | * |
| 571 | * @return none |
| 572 | */ |
| 573 | void |
c_cgodav | bd5b3c2 | 2017-06-07 12:31:40 +0530 | [diff] [blame] | 574 | (*txrx_peer_authorize)(struct cdp_peer *peer, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 575 | u_int32_t authorize); |
| 576 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 577 | /* Should be ol_txrx_ctrl_api.h */ |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 578 | void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 579 | |
Venkateswara Swamy Bandaru | ec4f8e6 | 2017-03-07 11:04:28 +0530 | [diff] [blame] | 580 | /** |
| 581 | * @brief setting mesh rx filter |
| 582 | * @details |
| 583 | * based on the bits enabled in the filter packets has to be dropped. |
| 584 | * |
| 585 | * @param vdev - the data virtual device object |
| 586 | * @param val - value to set |
| 587 | */ |
| 588 | void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); |
| 589 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 590 | void (*tx_flush_buffers)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 591 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 592 | int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 593 | |
Ishank Jain | 9f174c6 | 2017-03-30 18:37:42 +0530 | [diff] [blame] | 594 | void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, |
| 595 | enum cdp_vdev_param_type param, uint32_t val); |
| 596 | |
c_cgodav | bd5b3c2 | 2017-06-07 12:31:40 +0530 | [diff] [blame] | 597 | void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); |
Tallapragada Kalyan | fd1edcc | 2017-03-07 19:34:29 +0530 | [diff] [blame] | 598 | /** |
| 599 | * @brief Set the reo dest ring num of the radio |
| 600 | * @details |
| 601 | * Set the reo destination ring no on which we will receive |
| 602 | * pkts for this radio. |
| 603 | * |
| 604 | * @param pdev - the data physical device object |
| 605 | * @param reo_dest_ring_num - value ranges between 1 - 4 |
| 606 | */ |
| 607 | void (*txrx_set_pdev_reo_dest)( |
| 608 | struct cdp_pdev *pdev, |
| 609 | enum cdp_host_reo_dest_ring reo_dest_ring_num); |
| 610 | |
| 611 | /** |
| 612 | * @brief Get the reo dest ring num of the radio |
| 613 | * @details |
| 614 | * Get the reo destination ring no on which we will receive |
| 615 | * pkts for this radio. |
| 616 | * |
| 617 | * @param pdev - the data physical device object |
| 618 | * @return the reo destination ring number |
| 619 | */ |
| 620 | enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( |
| 621 | struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | 2668994 | 2017-04-17 16:52:46 +0530 | [diff] [blame] | 622 | |
| 623 | int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, |
| 624 | uint32_t event); |
| 625 | |
| 626 | int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, |
| 627 | uint32_t event); |
c_cgodav | bd5b3c2 | 2017-06-07 12:31:40 +0530 | [diff] [blame] | 628 | int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); |
Kiran Venkatappa | e2f4335 | 2017-08-23 22:14:44 +0530 | [diff] [blame] | 629 | |
| 630 | void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, |
| 631 | uint8_t subtype, uint8_t tx_power); |
Soumya Bhat | cfbb895 | 2017-10-03 15:04:09 +0530 | [diff] [blame] | 632 | |
Chaithanya Garrepalli | 7ab76ae | 2018-07-05 14:53:50 +0530 | [diff] [blame] | 633 | /** |
| 634 | * txrx_set_pdev_param() - callback to set pdev parameter |
| 635 | * @soc: opaque soc handle |
| 636 | * @pdev: data path pdev handle |
| 637 | * @val: value of pdev_tx_capture |
| 638 | * |
| 639 | * Return: status: 0 - Success, non-zero: Failure |
| 640 | */ |
| 641 | QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev, |
| 642 | enum cdp_pdev_param_type type, |
Kai Chen | 99efa0d | 2019-08-20 17:51:27 -0700 | [diff] [blame] | 643 | uint32_t val); |
Venkata Sharath Chandra Manchala | 09adf53 | 2017-11-03 14:44:35 -0700 | [diff] [blame] | 644 | void * (*txrx_get_pldev)(struct cdp_pdev *pdev); |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 645 | |
| 646 | #ifdef ATH_SUPPORT_NAC_RSSI |
| 647 | QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, |
| 648 | enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, |
| 649 | uint8_t chan_num); |
Chaithanya Garrepalli | 95fc62f | 2018-07-24 18:52:27 +0530 | [diff] [blame] | 650 | QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev, |
| 651 | char *macaddr, |
| 652 | uint8_t *rssi); |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 653 | #endif |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 654 | void (*set_key)(struct cdp_peer *peer_handle, |
| 655 | bool is_unicast, uint32_t *key); |
phadiman | 4213e9c | 2018-10-29 12:50:02 +0530 | [diff] [blame] | 656 | |
| 657 | uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev, |
| 658 | enum cdp_vdev_param_type param); |
Keyur Parekh | c28f839 | 2018-11-21 02:50:56 -0800 | [diff] [blame] | 659 | int (*enable_peer_based_pktlog)(struct cdp_pdev |
| 660 | *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb); |
| 661 | |
Varsha Mishra | a331e6e | 2019-03-11 12:16:14 +0530 | [diff] [blame] | 662 | void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf); |
Karunakar Dasineni | 142f9ba | 2019-03-19 23:04:59 -0700 | [diff] [blame] | 663 | #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG |
| 664 | QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)( |
| 665 | struct cdp_pdev *txrx_pdev_handle, |
| 666 | uint32_t protocol_mask, uint16_t protocol_type, |
| 667 | uint16_t tag); |
| 668 | #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS |
| 669 | void (*txrx_dump_pdev_rx_protocol_tag_stats)( |
| 670 | struct cdp_pdev *txrx_pdev_handle, |
| 671 | uint16_t protocol_type); |
| 672 | #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ |
| 673 | #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ |
Sumeet Rao | c4fa4df | 2019-07-05 02:11:19 -0700 | [diff] [blame] | 674 | #ifdef WLAN_SUPPORT_RX_FLOW_TAG |
| 675 | QDF_STATUS (*txrx_set_rx_flow_tag)( |
| 676 | struct cdp_pdev *txrx_pdev_handle, |
| 677 | struct cdp_rx_flow_info *flow_info); |
| 678 | QDF_STATUS (*txrx_dump_rx_flow_tag_stats)( |
| 679 | struct cdp_pdev *txrx_pdev_handle, |
| 680 | struct cdp_rx_flow_info *flow_info); |
| 681 | #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ |
Varsha Mishra | 6e1760c | 2019-07-27 22:51:42 +0530 | [diff] [blame] | 682 | #ifdef QCA_MULTIPASS_SUPPORT |
| 683 | void (*txrx_peer_set_vlan_id)(ol_txrx_soc_handle soc, |
| 684 | struct cdp_vdev *vdev, uint8_t *peer_mac, |
| 685 | uint16_t vlan_id); |
| 686 | #endif |
Sumeet Rao | 511db29 | 2019-07-22 11:42:48 -0700 | [diff] [blame] | 687 | #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) |
| 688 | QDF_STATUS (*txrx_update_peer_pkt_capture_params)( |
| 689 | struct cdp_pdev *txrx_pdev_handle, |
| 690 | bool is_rx_pkt_cap_enable, bool is_tx_pkt_cap_enable, |
| 691 | uint8_t *peer_mac); |
| 692 | #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 693 | }; |
| 694 | |
| 695 | struct cdp_me_ops { |
| 696 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 697 | u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 698 | (struct cdp_pdev *pdev, u_int16_t buf_count); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 699 | |
| 700 | u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 701 | struct cdp_pdev *pdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 702 | u_int16_t buf_count); |
| 703 | |
| 704 | u_int16_t |
| 705 | (*tx_get_mcast_buf_allocated_marked) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 706 | (struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 707 | void |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 708 | (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 709 | |
| 710 | void |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 711 | (*tx_me_free_descriptor)(struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 712 | |
| 713 | uint16_t |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 714 | (*tx_me_convert_ucast)(struct cdp_vdev *vdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 715 | qdf_nbuf_t wbuf, u_int8_t newmac[][6], |
| 716 | uint8_t newmaccnt); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 717 | /* Should be a function pointer in ol_txrx_osif_ops{} */ |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 718 | /** |
| 719 | * @brief notify mcast frame indication from FW. |
| 720 | * @details |
| 721 | * This notification will be used to convert |
| 722 | * multicast frame to unicast. |
| 723 | * |
| 724 | * @param pdev - handle to the ctrl SW's physical device object |
| 725 | * @param vdev_id - ID of the virtual device received the special data |
| 726 | * @param msdu - the multicast msdu returned by FW for host inspect |
| 727 | */ |
| 728 | |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 729 | int (*mcast_notify)(struct cdp_pdev *pdev, |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 730 | u_int8_t vdev_id, qdf_nbuf_t msdu); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 731 | }; |
| 732 | |
| 733 | struct cdp_mon_ops { |
| 734 | |
| 735 | void (*txrx_monitor_set_filter_ucast_data) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 736 | (struct cdp_pdev *, u_int8_t val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 737 | void (*txrx_monitor_set_filter_mcast_data) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 738 | (struct cdp_pdev *, u_int8_t val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 739 | void (*txrx_monitor_set_filter_non_data) |
Amir Patel | 253053f | 2018-07-17 00:20:57 +0530 | [diff] [blame] | 740 | (struct cdp_pdev *, u_int8_t val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 741 | |
nobelj | c8eb4d6 | 2018-01-04 14:29:32 -0800 | [diff] [blame] | 742 | bool (*txrx_monitor_get_filter_ucast_data) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 743 | (struct cdp_vdev *vdev_txrx_handle); |
nobelj | c8eb4d6 | 2018-01-04 14:29:32 -0800 | [diff] [blame] | 744 | bool (*txrx_monitor_get_filter_mcast_data) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 745 | (struct cdp_vdev *vdev_txrx_handle); |
nobelj | c8eb4d6 | 2018-01-04 14:29:32 -0800 | [diff] [blame] | 746 | bool (*txrx_monitor_get_filter_non_data) |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 747 | (struct cdp_vdev *vdev_txrx_handle); |
Venkata Sharath Chandra Manchala | 8747958 | 2018-08-01 12:45:34 -0700 | [diff] [blame] | 748 | QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 749 | |
nobelj | d124b74 | 2017-10-16 11:59:12 -0700 | [diff] [blame] | 750 | /* HK advance monitor filter support */ |
Venkata Sharath Chandra Manchala | 8747958 | 2018-08-01 12:45:34 -0700 | [diff] [blame] | 751 | QDF_STATUS (*txrx_set_advance_monitor_filter) |
nobelj | d124b74 | 2017-10-16 11:59:12 -0700 | [diff] [blame] | 752 | (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); |
Jinwei Chen | e1ffcf0 | 2019-06-12 22:31:27 +0800 | [diff] [blame] | 753 | |
| 754 | void (*txrx_monitor_record_channel) |
| 755 | (struct cdp_pdev *, int val); |
Karunakar Dasineni | 13abde9 | 2019-09-10 12:40:41 -0700 | [diff] [blame] | 756 | |
| 757 | void (*txrx_deliver_tx_mgmt) |
| 758 | (struct cdp_pdev *pdev, qdf_nbuf_t nbuf); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 759 | }; |
| 760 | |
Alok Kumar | 3d15ae8 | 2019-08-15 20:56:40 +0530 | [diff] [blame] | 761 | #ifdef WLAN_FEATURE_PKT_CAPTURE |
| 762 | struct cdp_pktcapture_ops { |
| 763 | void (*txrx_pktcapture_set_mode) |
| 764 | (struct cdp_soc_t *soc, |
| 765 | uint8_t pdev_id, |
| 766 | uint8_t mode); |
| 767 | |
| 768 | uint8_t (*txrx_pktcapture_get_mode) |
| 769 | (struct cdp_soc_t *soc, |
| 770 | uint8_t pdev_id); |
| 771 | |
| 772 | QDF_STATUS (*txrx_pktcapture_cb_register) |
| 773 | (struct cdp_soc_t *soc, |
| 774 | uint8_t pdev_id, |
| 775 | void *context, |
| 776 | QDF_STATUS(cb)(void *, qdf_nbuf_t)); |
| 777 | |
| 778 | QDF_STATUS (*txrx_pktcapture_cb_deregister) |
| 779 | (struct cdp_soc_t *soc, |
| 780 | uint8_t pdev_id); |
| 781 | |
| 782 | QDF_STATUS (*txrx_pktcapture_mgmtpkt_process) |
| 783 | (struct cdp_soc_t *soc, |
| 784 | uint8_t pdev_id, |
| 785 | struct mon_rx_status *txrx_status, |
| 786 | qdf_nbuf_t nbuf, uint8_t status); |
| 787 | |
| 788 | void (*txrx_pktcapture_record_channel) |
| 789 | (struct cdp_soc_t *soc, |
| 790 | uint8_t pdev_id, |
| 791 | int chan_no); |
| 792 | }; |
| 793 | #endif /* #ifdef WLAN_FEATURE_PKT_CAPTURE */ |
| 794 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 795 | struct cdp_host_stats_ops { |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 796 | int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 797 | struct ol_txrx_stats_req *req); |
Ishank Jain | 6290a3c | 2017-03-21 10:49:39 +0530 | [diff] [blame] | 798 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 799 | QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc, |
| 800 | uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 801 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 802 | QDF_STATUS |
| 803 | (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 804 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 805 | int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id, |
| 806 | struct cdp_stats_extd *buf); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 807 | /** |
| 808 | * @brief Enable enhanced stats functionality. |
| 809 | * |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 810 | * @param soc - the soc handle |
| 811 | * @param pdev_id - pdev_id of pdev |
| 812 | * @return - QDF_STATUS |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 813 | */ |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 814 | QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc, |
| 815 | uint8_t pdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 816 | |
| 817 | /** |
| 818 | * @brief Disable enhanced stats functionality. |
| 819 | * |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 820 | * @param soc - the soc handle |
| 821 | * @param pdev_id - pdev_id of pdev |
| 822 | * @return - QDF_STATUS |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 823 | */ |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 824 | QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc, |
| 825 | uint8_t pdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 826 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 827 | QDF_STATUS |
| 828 | (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 829 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 830 | QDF_STATUS |
| 831 | (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 832 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 833 | QDF_STATUS |
| 834 | (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 835 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 836 | QDF_STATUS |
| 837 | (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 838 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 839 | QDF_STATUS |
| 840 | (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 841 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 842 | QDF_STATUS |
| 843 | (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 844 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 845 | QDF_STATUS |
| 846 | (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Venkata Sharath Chandra Manchala | 0cb3198 | 2018-03-30 15:55:26 -0700 | [diff] [blame] | 847 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 848 | QDF_STATUS |
| 849 | (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr); |
Venkata Sharath Chandra Manchala | 0cb3198 | 2018-03-30 15:55:26 -0700 | [diff] [blame] | 850 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 851 | int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 852 | struct ol_txrx_stats_req *req); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 853 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 854 | int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc, |
| 855 | uint8_t pdev_id, |
| 856 | uint8_t *addr, void *stats, |
| 857 | uint32_t last_tx_rate_mcs, |
| 858 | uint32_t stats_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 859 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 860 | QDF_STATUS |
| 861 | (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, |
| 862 | uint8_t *addr, |
| 863 | uint32_t cap, uint32_t copy_stats); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 864 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 865 | QDF_STATUS |
| 866 | (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, |
| 867 | void *data, |
| 868 | uint32_t data_len); |
| 869 | QDF_STATUS |
| 870 | (*txrx_update_pdev_stats)(struct cdp_soc_t *soc, |
| 871 | uint8_t pdev_id, void *data, |
Amir Patel | 253053f | 2018-07-17 00:20:57 +0530 | [diff] [blame] | 872 | uint16_t stats_id); |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 873 | QDF_STATUS |
| 874 | (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 875 | uint8_t *peer_mac, |
| 876 | struct cdp_peer_stats *peer_stats); |
| 877 | QDF_STATUS |
| 878 | (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc, |
| 879 | uint8_t vdev_id, |
| 880 | uint8_t *peer_mac); |
| 881 | QDF_STATUS |
| 882 | (*txrx_reset_peer_stats)(struct cdp_soc_t *soc, |
| 883 | uint8_t vdev_id, uint8_t *peer_mac); |
Amir Patel | 253053f | 2018-07-17 00:20:57 +0530 | [diff] [blame] | 884 | int |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 885 | (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 886 | void *buf, bool is_aggregate); |
Amir Patel | 253053f | 2018-07-17 00:20:57 +0530 | [diff] [blame] | 887 | int |
| 888 | (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc, |
| 889 | void *data, uint32_t len, |
| 890 | uint32_t stats_id); |
| 891 | int |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 892 | (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc, |
| 893 | uint8_t vdev_id, |
| 894 | wmi_host_vdev_extd_stats *buffer); |
| 895 | QDF_STATUS |
| 896 | (*txrx_update_vdev_stats)(struct cdp_soc_t *soc, |
| 897 | uint8_t vdev_id, void *buf, |
Debasis Das | c246791 | 2018-09-10 20:27:07 +0530 | [diff] [blame] | 898 | uint16_t stats_id); |
Amir Patel | 756d05e | 2018-10-10 12:35:30 +0530 | [diff] [blame] | 899 | int |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 900 | (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, |
Amir Patel | 756d05e | 2018-10-10 12:35:30 +0530 | [diff] [blame] | 901 | void *buf); |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 902 | QDF_STATUS |
| 903 | (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id, |
| 904 | struct cdp_pdev_stats *buf); |
Surya Prakash Raajen | 3a01bdd | 2019-02-19 13:19:36 +0530 | [diff] [blame] | 905 | int |
| 906 | (*txrx_get_ratekbps)(int preamb, int mcs, |
| 907 | int htflag, int gintval); |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 908 | |
| 909 | QDF_STATUS |
| 910 | (*configure_rate_stats)(struct cdp_soc_t *soc, uint8_t val); |
| 911 | |
| 912 | QDF_STATUS |
| 913 | (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 914 | uint8_t *peer_mac, void *stats, |
| 915 | uint32_t last_tx_rate_mcs, |
| 916 | uint32_t stats_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 917 | }; |
| 918 | |
| 919 | struct cdp_wds_ops { |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 920 | QDF_STATUS |
| 921 | (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 922 | u_int32_t val); |
| 923 | QDF_STATUS |
| 924 | (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc, |
| 925 | uint8_t vdev_id, uint8_t *peer_mac, |
| 926 | int wds_tx_ucast, int wds_tx_mcast); |
| 927 | int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 928 | uint32_t val); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 929 | }; |
| 930 | |
| 931 | struct cdp_raw_ops { |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 932 | int (*txrx_get_nwifi_mode)(struct cdp_soc_t *soc, uint8_t vdev_id); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 933 | |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 934 | QDF_STATUS |
| 935 | (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 936 | qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 937 | }; |
| 938 | |
Pranita Solanke | 0586296 | 2019-01-09 11:39:29 +0530 | [diff] [blame] | 939 | #ifdef PEER_FLOW_CONTROL |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 940 | struct cdp_pflow_ops { |
Pavankumar Nandeshwar | e54c584 | 2019-09-29 16:01:09 +0530 | [diff] [blame] | 941 | uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc, |
| 942 | uint8_t pdev_id, |
| 943 | enum _ol_ath_param_t, |
| 944 | uint32_t, void *); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 945 | }; |
Pranita Solanke | 0586296 | 2019-01-09 11:39:29 +0530 | [diff] [blame] | 946 | #endif /* PEER_FLOW_CONTROL */ |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 947 | |
Dhanashri Atre | 1404917 | 2016-11-11 18:32:36 -0800 | [diff] [blame] | 948 | #define LRO_IPV4_SEED_ARR_SZ 5 |
| 949 | #define LRO_IPV6_SEED_ARR_SZ 11 |
| 950 | |
| 951 | /** |
Manjunathappa Prakash | 56023f5 | 2018-03-28 20:05:56 -0700 | [diff] [blame] | 952 | * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters |
| 953 | * @lro_enable: indicates whether rx_offld is enabled |
Dhanashri Atre | 1404917 | 2016-11-11 18:32:36 -0800 | [diff] [blame] | 954 | * @tcp_flag: If the TCP flags from the packet do not match |
| 955 | * the values in this field after masking with TCP flags mask |
Manjunathappa Prakash | 56023f5 | 2018-03-28 20:05:56 -0700 | [diff] [blame] | 956 | * below, packet is not rx_offld eligible |
Dhanashri Atre | 1404917 | 2016-11-11 18:32:36 -0800 | [diff] [blame] | 957 | * @tcp_flag_mask: field for comparing the TCP values provided |
| 958 | * above with the TCP flags field in the received packet |
| 959 | * @toeplitz_hash_ipv4: contains seed needed to compute the flow id |
| 960 | * 5-tuple toeplitz hash for ipv4 packets |
| 961 | * @toeplitz_hash_ipv6: contains seed needed to compute the flow id |
| 962 | * 5-tuple toeplitz hash for ipv6 packets |
| 963 | */ |
| 964 | struct cdp_lro_hash_config { |
| 965 | uint32_t lro_enable; |
| 966 | uint32_t tcp_flag:9, |
| 967 | tcp_flag_mask:9; |
| 968 | uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; |
| 969 | uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; |
| 970 | }; |
| 971 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 972 | struct ol_if_ops { |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 973 | void |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 974 | (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, |
| 975 | uint8_t pdev_id, uint8_t *peer_macaddr, |
| 976 | uint8_t vdev_id, |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 977 | bool hash_based, uint8_t ring_num); |
| 978 | QDF_STATUS |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 979 | (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, |
| 980 | uint8_t pdev_id, |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 981 | uint8_t vdev_id, uint8_t *peer_mac, |
| 982 | qdf_dma_addr_t hw_qdesc, int tid, |
Gyanranjan Hazarika | 7f9c050 | 2018-07-25 23:26:16 -0700 | [diff] [blame] | 983 | uint16_t queue_num, |
| 984 | uint8_t ba_window_size_valid, |
| 985 | uint16_t ba_window_size); |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 986 | QDF_STATUS |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 987 | (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, |
| 988 | uint8_t pdev_id, |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 989 | uint8_t vdev_id, uint8_t *peer_macaddr, |
| 990 | uint32_t tid_mask); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 991 | int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 992 | uint8_t pdev_id, |
| 993 | uint8_t *peer_mac, |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 994 | uint8_t *vdev_mac, enum wlan_op_mode opmode); |
Pramod Simha | 7f7b4aa | 2017-03-27 14:48:09 -0700 | [diff] [blame] | 995 | bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 996 | int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, |
| 997 | uint8_t vdev_id, |
| 998 | uint8_t *peer_macaddr, |
syed touqeer pasha | 0050ec9 | 2018-10-14 19:36:15 +0530 | [diff] [blame] | 999 | const uint8_t *dest_macaddr, |
| 1000 | uint8_t *next_node_mac, |
| 1001 | uint32_t flags); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1002 | int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, |
| 1003 | uint8_t vdev_id, |
| 1004 | uint8_t *dest_macaddr, |
| 1005 | uint8_t *peer_macaddr, |
| 1006 | uint32_t flags); |
| 1007 | void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc, |
| 1008 | uint8_t vdev_id, |
Chaithanya Garrepalli | 267ae0e | 2019-02-19 23:45:12 +0530 | [diff] [blame] | 1009 | uint8_t *wds_macaddr, |
| 1010 | uint8_t type); |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 1011 | QDF_STATUS |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1012 | (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id, |
Akshay Kosigi | 0e7fdae | 2018-05-17 12:16:57 +0530 | [diff] [blame] | 1013 | struct cdp_lro_hash_config *rx_offld_hash); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1014 | |
Ishank Jain | 1e7401c | 2017-02-17 15:38:39 +0530 | [diff] [blame] | 1015 | void (*update_dp_stats)(void *soc, void *stats, uint16_t id, |
| 1016 | uint8_t type); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1017 | #ifdef FEATURE_NAC_RSSI |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1018 | uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc, |
| 1019 | uint8_t pdev_id, void *msg); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1020 | #else |
| 1021 | uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh); |
Jinwei Chen | 4673310 | 2018-08-20 15:42:08 +0800 | [diff] [blame] | 1022 | #endif |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1023 | |
| 1024 | int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc, |
Akshay Kosigi | eec6db9 | 2019-07-02 14:25:54 +0530 | [diff] [blame] | 1025 | uint16_t peer_id, uint16_t hw_peer_id, |
| 1026 | uint8_t vdev_id, uint8_t *peer_mac_addr, |
| 1027 | enum cdp_txrx_ast_entry_type peer_type, |
| 1028 | uint32_t tx_ast_hashidx); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1029 | int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc, |
Akshay Kosigi | eec6db9 | 2019-07-02 14:25:54 +0530 | [diff] [blame] | 1030 | uint16_t peer_id, |
Subhranil Choudhury | 9bcfecf | 2019-02-28 13:41:45 +0530 | [diff] [blame] | 1031 | uint8_t vdev_id); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 1032 | |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1033 | int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc, |
Akshay Kosigi | eec6db9 | 2019-07-02 14:25:54 +0530 | [diff] [blame] | 1034 | enum cdp_cfg_param_type param_num); |
Bharat Kumar M | 9a5d537 | 2017-05-08 17:41:42 +0530 | [diff] [blame] | 1035 | |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1036 | void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1037 | uint8_t pdev_id, |
Rakshith Suresh Patkar | d863f8d | 2019-07-16 16:30:59 +0530 | [diff] [blame] | 1038 | struct cdp_rx_mic_err_info *info); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1039 | |
| 1040 | bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1041 | uint8_t vdev_id, uint8_t *peer_mac_addr, |
Akshay Kosigi | 78eced8 | 2018-05-14 14:53:48 +0530 | [diff] [blame] | 1042 | qdf_nbuf_t nbuf, |
Pramod Simha | 6e10cb2 | 2018-06-20 12:05:44 -0700 | [diff] [blame] | 1043 | uint16_t hdr_space); |
Gurumoorthi Gnanasambandhan | 25607a7 | 2017-08-07 11:53:16 +0530 | [diff] [blame] | 1044 | |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1045 | uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1046 | uint8_t vdev_id, uint16_t freq); |
| 1047 | |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 1048 | #ifdef ATH_SUPPORT_NAC_RSSI |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1049 | int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1050 | uint8_t pdev_id, |
| 1051 | u_int8_t vdev_id, |
| 1052 | enum cdp_nac_param_cmd cmd, char *bssid, |
| 1053 | char *client_macaddr, uint8_t chan_num); |
| 1054 | |
| 1055 | int |
| 1056 | (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1057 | uint8_t pdev_id, u_int8_t vdev_id, |
| 1058 | enum cdp_nac_param_cmd cmd, |
| 1059 | char *bssid, char *client_mac); |
Soumya Bhat | bc719e6 | 2018-02-18 18:21:25 +0530 | [diff] [blame] | 1060 | #endif |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1061 | int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1062 | uint16_t pdev_id, uint8_t *peer_macaddr); |
Pamidipati, Vijay | d578db1 | 2018-04-09 23:03:12 +0530 | [diff] [blame] | 1063 | |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 1064 | /** |
| 1065 | * send_delba() - Send delba to peer |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 1066 | * @psoc: Objmgr soc handle |
| 1067 | * @vdev_id: dp vdev id |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 1068 | * @peer_macaddr: Peer mac addr |
| 1069 | * @tid: Tid number |
| 1070 | * |
| 1071 | * Return: 0 for success, non-zero for failure |
| 1072 | */ |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1073 | int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id, |
Pavankumar Nandeshwar | 715fdc3 | 2019-10-03 20:51:01 +0530 | [diff] [blame] | 1074 | uint8_t *peer_macaddr, uint8_t tid, |
sumedh baikady | faadbb6 | 2018-08-21 21:13:42 -0700 | [diff] [blame] | 1075 | uint8_t reason_code); |
Pavankumar Nandeshwar | 4c7b81b | 2019-09-27 11:27:12 +0530 | [diff] [blame] | 1076 | |
| 1077 | int |
| 1078 | (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc, |
| 1079 | uint8_t vdev_id, |
| 1080 | uint8_t *dest_macaddr, |
| 1081 | uint8_t *peer_macaddr, |
| 1082 | uint32_t flags); |
Manjunathappa Prakash | 85de96c | 2019-05-23 17:35:12 -0700 | [diff] [blame] | 1083 | |
| 1084 | bool (*is_roam_inprogress)(uint32_t vdev_id); |
Jinwei Chen | 0f015f2 | 2019-07-18 19:47:59 +0800 | [diff] [blame] | 1085 | enum QDF_GLOBAL_MODE (*get_con_mode)(void); |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1086 | /* TODO: Add any other control path calls required to OL_IF/WMA layer */ |
| 1087 | }; |
| 1088 | |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1089 | #ifdef DP_PEER_EXTENDED_API |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1090 | /** |
| 1091 | * struct cdp_misc_ops - mcl ops not classified |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1092 | * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer |
| 1093 | * @set_wmm_param: set wmm parameters |
| 1094 | * @bad_peer_txctl_set_setting: configure bad peer tx limit setting |
| 1095 | * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit |
| 1096 | * @hl_tdls_flag_reset: reset tdls flag for vdev |
| 1097 | * @tx_non_std: Allow the control-path SW to send data frames |
| 1098 | * @get_vdev_id: get vdev id |
| 1099 | * @set_wisa_mode: set wisa mode for a vdev |
| 1100 | * @txrx_data_stall_cb_register: register data stall callback |
| 1101 | * @txrx_data_stall_cb_deregister: deregister data stall callback |
| 1102 | * @txrx_post_data_stall_event: post data stall event |
| 1103 | * @runtime_suspend: ensure TXRX is ready to runtime suspend |
| 1104 | * @runtime_resume: ensure TXRX is ready to runtime resume |
| 1105 | * @get_opmode: get operation mode of vdev |
| 1106 | * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for |
| 1107 | marking first packet after wow wakeup |
| 1108 | * @update_mac_id: update mac_id for vdev |
| 1109 | * @flush_rx_frames: flush rx frames on the queue |
| 1110 | * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that |
| 1111 | has been forwarded from txrx layer |
| 1112 | without going to upper layers |
| 1113 | * @pkt_log_init: handler to initialize packet log |
| 1114 | * @pkt_log_con_service: handler to connect packet log service |
| 1115 | * @get_num_rx_contexts: handler to get number of RX contexts |
| 1116 | * @register_packetdump_cb: register callback for different pktlog |
| 1117 | * @unregister_packetdump_cb: unregister callback for different pktlog |
| 1118 | * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag |
| 1119 | * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag |
| 1120 | * |
| 1121 | * Function pointers for miscellaneous soc/pdev/vdev related operations. |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1122 | */ |
| 1123 | struct cdp_misc_ops { |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1124 | uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl, |
| 1125 | uint8_t vdev_id, |
| 1126 | uint16_t timer_value_sec); |
| 1127 | void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1128 | struct ol_tx_wmm_param_t wmm_param); |
| 1129 | void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl, |
| 1130 | uint8_t pdev_id, int enable, |
| 1131 | int period, int txq_limit); |
| 1132 | void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl, |
| 1133 | uint8_t pdev_id, |
| 1134 | int level, int tput_thresh, |
| 1135 | int tx_limit); |
| 1136 | void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl, |
| 1137 | uint8_t vdev_id, bool flag); |
| 1138 | qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1139 | enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 1140 | uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1141 | uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl, |
| 1142 | uint8_t vdev_id); |
| 1143 | QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl, |
| 1144 | uint8_t vdev_id, bool enable); |
| 1145 | QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl, |
| 1146 | uint8_t pdev_id, |
| 1147 | data_stall_detect_cb cb); |
| 1148 | QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl, |
| 1149 | uint8_t pdev_id, |
| 1150 | data_stall_detect_cb cb); |
Poddar, Siddarth | 5c57a89 | 2017-09-04 12:16:38 +0530 | [diff] [blame] | 1151 | void (*txrx_post_data_stall_event)( |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1152 | struct cdp_soc_t *soc_hdl, |
Poddar, Siddarth | 5c57a89 | 2017-09-04 12:16:38 +0530 | [diff] [blame] | 1153 | enum data_stall_log_event_indicator indicator, |
| 1154 | enum data_stall_log_event_type data_stall_type, |
| 1155 | uint32_t pdev_id, uint32_t vdev_id_bitmap, |
| 1156 | enum data_stall_log_recovery_type recovery_type); |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1157 | QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl, |
| 1158 | uint8_t pdev_id); |
| 1159 | QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl, |
| 1160 | uint8_t pdev_id); |
| 1161 | int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); |
| 1162 | void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl, |
| 1163 | uint8_t pdev_id, uint8_t value); |
| 1164 | void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1165 | uint8_t mac_id); |
| 1166 | void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1167 | void *peer, bool drop); |
| 1168 | A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl, |
| 1169 | uint8_t vdev_id, |
| 1170 | uint64_t *fwd_tx_packets, |
| 1171 | uint64_t *fwd_rx_packets); |
| 1172 | void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev, |
| 1173 | void *scn); |
| 1174 | void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl, |
| 1175 | uint8_t pdev_id, void *scn); |
| 1176 | int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl); |
| 1177 | void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1178 | ol_txrx_pktdump_cb tx_cb, |
Lin Bai | 324f491 | 2018-12-13 16:13:24 +0800 | [diff] [blame] | 1179 | ol_txrx_pktdump_cb rx_cb); |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1180 | void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl, |
| 1181 | uint8_t pdev_id); |
| 1182 | void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl, |
| 1183 | uint8_t pdev_id); |
| 1184 | void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl, |
| 1185 | uint8_t vdev_id, |
Tiger Yu | 6f1fc00 | 2019-04-25 10:41:30 +0800 | [diff] [blame] | 1186 | unsigned long rx_packets, |
| 1187 | uint32_t time_in_ms, |
| 1188 | uint32_t high_th, |
| 1189 | uint32_t low_th); |
Nirav Shah | aa6ca44 | 2019-11-13 18:17:05 +0530 | [diff] [blame] | 1190 | void (*vdev_set_bundle_require_flag)(uint8_t vdev_id, |
| 1191 | unsigned long tx_bytes, |
| 1192 | uint32_t time_in_ms, |
| 1193 | uint32_t high_th, |
| 1194 | uint32_t low_th); |
| 1195 | void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl, |
| 1196 | uint8_t pdev_id); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1197 | }; |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1198 | |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1199 | /** |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1200 | * struct cdp_ocb_ops - mcl ocb ops |
Rakesh Pillai | 5396b88 | 2019-07-07 00:36:41 +0530 | [diff] [blame] | 1201 | * @set_ocb_chan_info: set OCB channel info |
| 1202 | * @get_ocb_chan_info: get OCB channel info |
| 1203 | * |
| 1204 | * Function pointers for operations related to OCB. |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1205 | */ |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1206 | struct cdp_ocb_ops { |
Rakesh Pillai | 5396b88 | 2019-07-07 00:36:41 +0530 | [diff] [blame] | 1207 | void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1208 | struct ol_txrx_ocb_set_chan ocb_set_chan); |
| 1209 | struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)( |
| 1210 | struct cdp_soc_t *soc_hdl, uint8_t vdev_id); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1211 | }; |
| 1212 | |
| 1213 | /** |
| 1214 | * struct cdp_peer_ops - mcl peer related ops |
| 1215 | * @register_peer: |
| 1216 | * @clear_peer: |
| 1217 | * @cfg_attach: |
| 1218 | * @find_peer_by_addr: |
| 1219 | * @find_peer_by_addr_and_vdev: |
| 1220 | * @local_peer_id: |
| 1221 | * @peer_find_by_local_id: |
| 1222 | * @peer_state_update: |
| 1223 | * @get_vdevid: |
| 1224 | * @get_vdev_by_sta_id: |
| 1225 | * @register_ocb_peer: |
| 1226 | * @peer_get_peer_mac_addr: |
| 1227 | * @get_peer_state: |
| 1228 | * @get_vdev_for_peer: |
| 1229 | * @update_ibss_add_peer_num_of_vdev: |
| 1230 | * @remove_peers_for_vdev: |
| 1231 | * @remove_peers_for_vdev_no_lock: |
| 1232 | * @copy_mac_addr_raw: |
| 1233 | * @add_last_real_peer: |
| 1234 | * @is_vdev_restore_last_peer: |
| 1235 | * @update_last_real_peer: |
| 1236 | */ |
| 1237 | struct cdp_peer_ops { |
| 1238 | QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, |
| 1239 | struct ol_txrx_desc_type *sta_desc); |
Rakshith Suresh Patkar | 0375108 | 2019-07-26 12:30:23 +0530 | [diff] [blame] | 1240 | QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, |
| 1241 | struct qdf_mac_addr peer_addr); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1242 | QDF_STATUS (*change_peer_state)(uint8_t sta_id, |
| 1243 | enum ol_txrx_peer_state sta_state, |
| 1244 | bool roam_synch_in_progress); |
| 1245 | void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1246 | uint8_t *peer_addr, |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1247 | enum peer_debug_id_type debug_id); |
| 1248 | void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); |
| 1249 | void * (*find_peer_by_addr)(struct cdp_pdev *pdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1250 | uint8_t *peer_addr); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1251 | void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, |
| 1252 | struct cdp_vdev *vdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1253 | uint8_t *peer_addr); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1254 | QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, |
| 1255 | uint8_t *peer_addr, |
| 1256 | enum ol_txrx_peer_state state); |
| 1257 | QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); |
Rakshith Suresh Patkar | fb42ec3 | 2019-07-26 13:52:00 +0530 | [diff] [blame] | 1258 | struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev, |
| 1259 | struct qdf_mac_addr peer_addr); |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1260 | QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1261 | uint8_t * (*peer_get_peer_mac_addr)(void *peer); |
| 1262 | int (*get_peer_state)(void *peer); |
| 1263 | struct cdp_vdev * (*get_vdev_for_peer)(void *peer); |
| 1264 | int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, |
| 1265 | int16_t peer_num_delta); |
| 1266 | void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, |
| 1267 | ol_txrx_vdev_peer_remove_cb callback, |
| 1268 | void *callback_context, bool remove_last_peer); |
| 1269 | void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, |
| 1270 | ol_txrx_vdev_peer_remove_cb callback, |
| 1271 | void *callback_context); |
| 1272 | void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); |
| 1273 | void (*add_last_real_peer)(struct cdp_pdev *pdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1274 | struct cdp_vdev *vdev); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1275 | bool (*is_vdev_restore_last_peer)(void *peer); |
| 1276 | void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev, |
Yeshwanth Sriram Guntuka | 65d5477 | 2019-11-22 14:50:02 +0530 | [diff] [blame] | 1277 | bool restore_last_peer); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1278 | void (*peer_detach_force_delete)(void *peer); |
nakul kachhwaha | f9ae936 | 2019-10-24 17:46:02 +0530 | [diff] [blame] | 1279 | void (*set_tdls_offchan_enabled)(void *peer, bool val); |
| 1280 | void (*set_peer_as_tdls_peer)(void *peer, bool val); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1281 | }; |
| 1282 | |
| 1283 | /** |
Rakesh Pillai | 2b88f07 | 2019-07-09 14:37:28 +0530 | [diff] [blame] | 1284 | * struct cdp_mob_stats_ops - mcl mob stats ops |
| 1285 | * @clear_stats: handler to clear ol txrx stats |
| 1286 | * @stats: handler to update ol txrx stats |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1287 | */ |
| 1288 | struct cdp_mob_stats_ops { |
Rakesh Pillai | 2b88f07 | 2019-07-09 14:37:28 +0530 | [diff] [blame] | 1289 | QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl, |
| 1290 | uint8_t pdev_id, uint8_t bitmap); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1291 | int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1292 | }; |
| 1293 | |
| 1294 | /** |
| 1295 | * struct cdp_pmf_ops - mcl protected management frame ops |
Vevek Venkatesan | dc1517e | 2019-09-16 23:52:28 +0530 | [diff] [blame] | 1296 | * @get_pn_info: handler to get pn info from peer |
| 1297 | * |
| 1298 | * Function pointers for pmf related operations. |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1299 | */ |
| 1300 | struct cdp_pmf_ops { |
Vevek Venkatesan | dc1517e | 2019-09-16 23:52:28 +0530 | [diff] [blame] | 1301 | void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac, |
| 1302 | uint8_t vdev_id, uint8_t **last_pn_valid, |
| 1303 | uint64_t **last_pn, uint32_t **rmf_pn_replays); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1304 | }; |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1305 | #endif |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1306 | |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1307 | |
| 1308 | #ifdef DP_FLOW_CTL |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1309 | /** |
| 1310 | * struct cdp_cfg_ops - mcl configuration ops |
Jiani Liu | 7067cd4 | 2019-05-09 11:17:51 +0800 | [diff] [blame] | 1311 | * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag |
| 1312 | * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag |
| 1313 | * @cfg_attach: hardcode the configuration parameters |
| 1314 | * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag |
| 1315 | * @is_rx_fwd_disabled: get the rx_fwd_disabled flag, |
| 1316 | * 1 enabled, 0 disabled. |
| 1317 | * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to |
| 1318 | * indicate that mgmt over wmi is enabled |
| 1319 | * or not, |
| 1320 | * 1 for enabled, 0 for disable |
| 1321 | * @is_high_latency: get device is high or low latency device, |
| 1322 | * 1 high latency bus, 0 low latency bus |
| 1323 | * @set_flow_control_parameters: set flow control parameters |
| 1324 | * @set_flow_steering: set flow_steering_enabled flag |
| 1325 | * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag |
| 1326 | * @set_new_htt_msg_format: set new_htt_msg_format flag |
| 1327 | * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag |
| 1328 | * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag |
| 1329 | * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag, |
| 1330 | * 1 enabled, 0 disabled. |
| 1331 | * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag, |
| 1332 | * 1 enabled, 0 disabled. |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1333 | */ |
| 1334 | struct cdp_cfg_ops { |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 1335 | void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, |
| 1336 | uint8_t disable_rx_fwd); |
| 1337 | void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, |
| 1338 | uint8_t val); |
| 1339 | struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); |
| 1340 | void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); |
| 1341 | uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1342 | void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 1343 | int (*is_high_latency)(struct cdp_cfg *cfg_pdev); |
| 1344 | void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, |
| 1345 | void *param); |
| 1346 | void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); |
Yu Wang | a3f76c5 | 2017-08-10 16:58:13 +0800 | [diff] [blame] | 1347 | void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); |
jitiphil | e65cc2d | 2018-11-05 14:31:21 +0530 | [diff] [blame] | 1348 | void (*set_new_htt_msg_format)(uint8_t val); |
Alok Kumar | 2e254c5 | 2018-11-28 17:26:53 +0530 | [diff] [blame] | 1349 | void (*set_peer_unmap_conf_support)(bool val); |
| 1350 | bool (*get_peer_unmap_conf_support)(void); |
Jiani Liu | 7067cd4 | 2019-05-09 11:17:51 +0800 | [diff] [blame] | 1351 | void (*set_tx_compl_tsf64)(bool val); |
| 1352 | bool (*get_tx_compl_tsf64)(void); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1353 | }; |
| 1354 | |
| 1355 | /** |
| 1356 | * struct cdp_flowctl_ops - mcl flow control |
Rakesh Pillai | dce0137 | 2019-06-28 19:11:23 +0530 | [diff] [blame] | 1357 | * @flow_pool_map_handler: handler to map flow_id and pool descriptors |
| 1358 | * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors |
| 1359 | * @register_pause_cb: handler to register tx pause callback |
| 1360 | * @set_desc_global_pool_size: handler to set global pool size |
| 1361 | * @dump_flow_pool_info: handler to dump global and flow pool info |
| 1362 | * @tx_desc_thresh_reached: handler to set tx desc threshold |
| 1363 | * |
| 1364 | * Function pointers for operations related to flow control |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1365 | */ |
| 1366 | struct cdp_flowctl_ops { |
Manjunathappa Prakash | 38205cc | 2018-03-06 14:22:44 -0800 | [diff] [blame] | 1367 | QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, |
Rakesh Pillai | dce0137 | 2019-06-28 19:11:23 +0530 | [diff] [blame] | 1368 | uint8_t pdev_id, |
Manjunathappa Prakash | 38205cc | 2018-03-06 14:22:44 -0800 | [diff] [blame] | 1369 | uint8_t vdev_id); |
| 1370 | void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, |
Rakesh Pillai | dce0137 | 2019-06-28 19:11:23 +0530 | [diff] [blame] | 1371 | uint8_t pdev_id, |
Manjunathappa Prakash | 38205cc | 2018-03-06 14:22:44 -0800 | [diff] [blame] | 1372 | uint8_t vdev_id); |
Manjunathappa Prakash | ced7ea6 | 2017-07-02 03:02:15 -0700 | [diff] [blame] | 1373 | QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, |
| 1374 | tx_pause_callback); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1375 | void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); |
Manjunathappa Prakash | 38205cc | 2018-03-06 14:22:44 -0800 | [diff] [blame] | 1376 | |
Rakesh Pillai | dce0137 | 2019-06-28 19:11:23 +0530 | [diff] [blame] | 1377 | void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl); |
Sravan Kumar Kairam | b75565e | 2018-12-17 17:55:44 +0530 | [diff] [blame] | 1378 | |
Rakesh Pillai | dce0137 | 2019-06-28 19:11:23 +0530 | [diff] [blame] | 1379 | bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl, |
| 1380 | uint8_t vdev_id); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1381 | }; |
| 1382 | |
| 1383 | /** |
| 1384 | * struct cdp_lflowctl_ops - mcl legacy flow control ops |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1385 | * @register_tx_flow_control: Register tx flow control callback |
| 1386 | * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev |
| 1387 | * @set_vdev_os_queue_status: Set vdev queue status |
| 1388 | * @deregister_tx_flow_control_cb: Deregister tx flow control callback |
| 1389 | * @flow_control_cb: Call osif flow control callback |
| 1390 | * @get_tx_resource: Get tx resources and comapre with watermark |
| 1391 | * @ll_set_tx_pause_q_depth: set pause queue depth |
| 1392 | * @vdev_flush: Flush all packets on a particular vdev |
| 1393 | * @vdev_pause: Pause a particular vdev |
| 1394 | * @vdev_unpause: Unpause a particular vdev |
| 1395 | * |
| 1396 | * Function pointers for operations related to flow control |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1397 | */ |
| 1398 | struct cdp_lflowctl_ops { |
Ajit Pal Singh | d1543e0 | 2018-04-19 15:02:22 +0530 | [diff] [blame] | 1399 | #ifdef QCA_HL_NETDEV_FLOW_CONTROL |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1400 | int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl, |
Rakesh Pillai | d295d1e | 2019-09-11 08:00:36 +0530 | [diff] [blame] | 1401 | uint8_t pdev_id, |
Ajit Pal Singh | d1543e0 | 2018-04-19 15:02:22 +0530 | [diff] [blame] | 1402 | tx_pause_callback flowcontrol); |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1403 | int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl, |
Yue Ma | 9c43a47 | 2019-11-12 12:51:02 -0800 | [diff] [blame^] | 1404 | uint8_t vdev_id, uint32_t chan_freq); |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1405 | int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl, |
| 1406 | uint8_t vdev_id, |
Ajit Pal Singh | 506c4d6 | 2018-04-25 16:59:19 +0530 | [diff] [blame] | 1407 | enum netif_action_type action); |
Ajit Pal Singh | d1543e0 | 2018-04-19 15:02:22 +0530 | [diff] [blame] | 1408 | #else |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1409 | int (*register_tx_flow_control)( |
| 1410 | struct cdp_soc_t *soc_hdl, |
| 1411 | uint8_t vdev_id, |
bings | 4dcaf8b | 2017-08-11 10:37:46 +0800 | [diff] [blame] | 1412 | ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, |
| 1413 | ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); |
Ajit Pal Singh | d1543e0 | 2018-04-19 15:02:22 +0530 | [diff] [blame] | 1414 | #endif /* QCA_HL_NETDEV_FLOW_CONTROL */ |
Rakesh Pillai | 2032554 | 2019-11-07 19:26:36 +0530 | [diff] [blame] | 1415 | int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl, |
| 1416 | uint8_t vdev_id); |
| 1417 | void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1418 | bool tx_resume); |
| 1419 | bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1420 | struct qdf_mac_addr peer_addr, |
| 1421 | unsigned int low_watermark, |
| 1422 | unsigned int high_watermark_offset); |
| 1423 | int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id, |
| 1424 | int pause_q_depth); |
| 1425 | void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id); |
| 1426 | void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1427 | uint32_t reason, uint32_t pause_type); |
| 1428 | void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1429 | uint32_t reason, uint32_t pause_type); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1430 | }; |
| 1431 | |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1432 | /** |
Rakesh Pillai | 20e302a | 2019-07-08 16:22:56 +0530 | [diff] [blame] | 1433 | * struct cdp_throttle_ops - mcl throttle ops |
| 1434 | * @throttle_init_period: handler to initialize tx throttle time |
| 1435 | * @throttle_set_level: handler to set tx throttle level |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1436 | */ |
| 1437 | struct cdp_throttle_ops { |
Rakesh Pillai | 20e302a | 2019-07-08 16:22:56 +0530 | [diff] [blame] | 1438 | void (*throttle_init_period)(struct cdp_soc_t *soc_hdl, |
| 1439 | uint8_t pdev_id, int period, |
| 1440 | uint8_t *dutycycle_level); |
| 1441 | void (*throttle_set_level)(struct cdp_soc_t *soc_hdl, |
| 1442 | uint8_t pdev_id, int level); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1443 | }; |
| 1444 | #endif |
| 1445 | |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1446 | #ifdef IPA_OFFLOAD |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1447 | /** |
| 1448 | * struct cdp_ipa_ops - mcl ipa data path ops |
| 1449 | * @ipa_get_resource: |
| 1450 | * @ipa_set_doorbell_paddr: |
| 1451 | * @ipa_set_active: |
| 1452 | * @ipa_op_response: |
| 1453 | * @ipa_register_op_cb: |
| 1454 | * @ipa_get_stat: |
| 1455 | * @ipa_tx_data_frame: |
| 1456 | */ |
| 1457 | struct cdp_ipa_ops { |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1458 | QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl, |
| 1459 | uint8_t pdev_id); |
| 1460 | QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl, |
| 1461 | uint8_t pdev_id); |
| 1462 | QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1463 | bool uc_active, bool is_tx); |
| 1464 | QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl, |
| 1465 | uint8_t pdev_id, uint8_t *op_msg); |
| 1466 | QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl, |
| 1467 | uint8_t pdev_id, |
| 1468 | void (*ipa_uc_op_cb_type) |
| 1469 | (uint8_t *op_msg, void *osif_ctxt), |
| 1470 | void *usr_ctxt); |
| 1471 | QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); |
| 1472 | qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl, |
| 1473 | uint8_t vdev_id, qdf_nbuf_t skb); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1474 | void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, |
Venkata Sharath Chandra Manchala | f2a125a | 2016-11-28 18:10:11 -0800 | [diff] [blame] | 1475 | uint32_t value); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1476 | #ifdef FEATURE_METERING |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1477 | QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl, |
| 1478 | uint8_t pdev_id, |
| 1479 | uint8_t reset_stats); |
| 1480 | QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl, |
| 1481 | uint8_t pdev_id, uint64_t quota_bytes); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1482 | #endif |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1483 | QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl, |
| 1484 | uint8_t pdev_id); |
| 1485 | QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl, |
| 1486 | uint8_t pdev_id); |
Yun Park | 1ba3ada | 2018-01-11 11:38:41 -0800 | [diff] [blame] | 1487 | #ifdef CONFIG_IPA_WDI_UNIFIED_API |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1488 | QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1489 | void *ipa_i2w_cb, void *ipa_w2i_cb, |
| 1490 | void *ipa_wdi_meter_notifier_cb, |
| 1491 | uint32_t ipa_desc_size, void *ipa_priv, |
| 1492 | bool is_rm_enabled, uint32_t *tx_pipe_handle, |
| 1493 | uint32_t *rx_pipe_handle, bool is_smmu_enabled, |
| 1494 | qdf_ipa_sys_connect_params_t *sys_in, |
| 1495 | bool over_gsi); |
Yun Park | 1ba3ada | 2018-01-11 11:38:41 -0800 | [diff] [blame] | 1496 | #else /* CONFIG_IPA_WDI_UNIFIED_API */ |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1497 | QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1498 | void *ipa_i2w_cb, void *ipa_w2i_cb, |
| 1499 | void *ipa_wdi_meter_notifier_cb, |
| 1500 | uint32_t ipa_desc_size, void *ipa_priv, |
| 1501 | bool is_rm_enabled, uint32_t *tx_pipe_handle, |
| 1502 | uint32_t *rx_pipe_handle); |
Yun Park | 1ba3ada | 2018-01-11 11:38:41 -0800 | [diff] [blame] | 1503 | #endif /* CONFIG_IPA_WDI_UNIFIED_API */ |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1504 | QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, |
| 1505 | uint32_t rx_pipe_handle); |
| 1506 | QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, |
Yun Park | fd269b5 | 2017-10-05 14:41:32 -0700 | [diff] [blame] | 1507 | qdf_ipa_client_type_t prod_client, |
| 1508 | qdf_ipa_client_type_t cons_client, |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1509 | uint8_t session_id, bool is_ipv6_enabled); |
| 1510 | QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1511 | QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl, |
| 1512 | uint8_t pdev_id); |
| 1513 | QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl, |
| 1514 | uint8_t pdev_id); |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1515 | QDF_STATUS (*ipa_set_perf_level)(int client, |
| 1516 | uint32_t max_supported_bw_mbps); |
Vevek Venkatesan | 2cc8c5d | 2019-08-22 16:29:46 +0530 | [diff] [blame] | 1517 | bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, |
| 1518 | qdf_nbuf_t nbuf, bool *fwd_success); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1519 | }; |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1520 | #endif |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1521 | |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1522 | #ifdef DP_POWER_SAVE |
| 1523 | /** |
| 1524 | * struct cdp_tx_delay_ops - mcl tx delay ops |
Rakesh Pillai | a0a2fe5 | 2019-07-04 20:11:58 +0530 | [diff] [blame] | 1525 | * @tx_delay: handler to get tx packet delay |
| 1526 | * @tx_delay_hist: handler to get tx packet delay histogram |
| 1527 | * @tx_packet_count: handler to get tx packet count |
| 1528 | * @tx_set_compute_interval: update compute interval period for TSM stats |
| 1529 | * |
| 1530 | * Function pointer for operations related to tx delay. |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1531 | */ |
| 1532 | struct cdp_tx_delay_ops { |
Rakesh Pillai | a0a2fe5 | 2019-07-04 20:11:58 +0530 | [diff] [blame] | 1533 | void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1534 | uint32_t *queue_delay_microsec, |
| 1535 | uint32_t *tx_delay_microsec, int category); |
| 1536 | void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1537 | uint16_t *bin_values, int category); |
| 1538 | void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, |
| 1539 | uint16_t *out_packet_count, |
| 1540 | uint16_t *out_packet_loss_count, int category); |
| 1541 | void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl, |
| 1542 | uint8_t pdev_id, uint32_t interval); |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1543 | }; |
| 1544 | |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1545 | /** |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1546 | * struct cdp_bus_ops - mcl bus suspend/resume ops |
Rakesh Pillai | 1d4d12e | 2019-09-13 04:15:08 +0530 | [diff] [blame] | 1547 | * @bus_suspend: handler for bus suspend |
| 1548 | * @bus_resume: handler for bus resume |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1549 | */ |
| 1550 | struct cdp_bus_ops { |
Rakesh Pillai | 1d4d12e | 2019-09-13 04:15:08 +0530 | [diff] [blame] | 1551 | QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); |
| 1552 | QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1553 | }; |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1554 | #endif |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1555 | |
Manjunathappa Prakash | 56023f5 | 2018-03-28 20:05:56 -0700 | [diff] [blame] | 1556 | #ifdef RECEIVE_OFFLOAD |
| 1557 | /** |
Mohit Khanna | 16816ae | 2018-10-30 14:12:03 -0700 | [diff] [blame] | 1558 | * struct cdp_rx_offld_ops - mcl host receive offload ops |
Manjunathappa Prakash | 56023f5 | 2018-03-28 20:05:56 -0700 | [diff] [blame] | 1559 | * @register_rx_offld_flush_cb: |
| 1560 | * @deregister_rx_offld_flush_cb: |
| 1561 | */ |
| 1562 | struct cdp_rx_offld_ops { |
| 1563 | void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); |
| 1564 | void (*deregister_rx_offld_flush_cb)(void); |
| 1565 | }; |
| 1566 | #endif |
| 1567 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1568 | struct cdp_ops { |
| 1569 | struct cdp_cmn_ops *cmn_drv_ops; |
| 1570 | struct cdp_ctrl_ops *ctrl_ops; |
| 1571 | struct cdp_me_ops *me_ops; |
| 1572 | struct cdp_mon_ops *mon_ops; |
| 1573 | struct cdp_host_stats_ops *host_stats_ops; |
| 1574 | struct cdp_wds_ops *wds_ops; |
| 1575 | struct cdp_raw_ops *raw_ops; |
| 1576 | struct cdp_pflow_ops *pflow_ops; |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1577 | #ifdef DP_PEER_EXTENDED_API |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1578 | struct cdp_misc_ops *misc_ops; |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1579 | struct cdp_peer_ops *peer_ops; |
| 1580 | struct cdp_ocb_ops *ocb_ops; |
| 1581 | struct cdp_mob_stats_ops *mob_stats_ops; |
| 1582 | struct cdp_pmf_ops *pmf_ops; |
| 1583 | #endif |
| 1584 | #ifdef DP_FLOW_CTL |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1585 | struct cdp_cfg_ops *cfg_ops; |
| 1586 | struct cdp_flowctl_ops *flowctl_ops; |
| 1587 | struct cdp_lflowctl_ops *l_flowctl_ops; |
Vevek Venkatesan | de31ff6 | 2019-06-11 12:50:49 +0530 | [diff] [blame] | 1588 | struct cdp_throttle_ops *throttle_ops; |
| 1589 | #endif |
| 1590 | #ifdef DP_POWER_SAVE |
| 1591 | struct cdp_bus_ops *bus_ops; |
| 1592 | struct cdp_tx_delay_ops *delay_ops; |
| 1593 | #endif |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1594 | #ifdef IPA_OFFLOAD |
Leo Chang | db6358c | 2016-09-27 17:00:52 -0700 | [diff] [blame] | 1595 | struct cdp_ipa_ops *ipa_ops; |
Yun Park | fde6b9e | 2017-06-26 17:13:11 -0700 | [diff] [blame] | 1596 | #endif |
Manjunathappa Prakash | 56023f5 | 2018-03-28 20:05:56 -0700 | [diff] [blame] | 1597 | #ifdef RECEIVE_OFFLOAD |
| 1598 | struct cdp_rx_offld_ops *rx_offld_ops; |
| 1599 | #endif |
Alok Kumar | 3d15ae8 | 2019-08-15 20:56:40 +0530 | [diff] [blame] | 1600 | #ifdef WLAN_FEATURE_PKT_CAPTURE |
| 1601 | struct cdp_pktcapture_ops *pktcapture_ops; |
| 1602 | #endif |
| 1603 | |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1604 | }; |
Nandha Kishore Easwaran | e5444bc | 2016-10-20 13:23:23 +0530 | [diff] [blame] | 1605 | #endif |