blob: f5e5a5bc23294e6fafbb76e76da4d146d25322a6 [file] [log] [blame]
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301/*
Vivek Natarajan95f004f2019-01-10 22:15:46 +05302 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05303 *
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053019/**
20 * @file cdp_txrx_ops.h
21 * @brief Define the host data path converged API functions
22 * called by the host control SW and the OS interface module
23 */
24#ifndef _CDP_TXRX_CMN_OPS_H_
25#define _CDP_TXRX_CMN_OPS_H_
26
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053027#include <cdp_txrx_cmn_struct.h>
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053028#include <cdp_txrx_stats_struct.h>
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080029#include "cdp_txrx_handle.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080030#include <cdp_txrx_mon_struct.h>
Pramod Simha7f7b4aa2017-03-27 14:48:09 -070031#include "wlan_objmgr_psoc_obj.h"
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +053032#include <wmi_unified_api.h>
33#include <wdi_event_api.h>
Yun Parkfd269b52017-10-05 14:41:32 -070034
Yun Parkfde6b9e2017-06-26 17:13:11 -070035#ifdef IPA_OFFLOAD
Yun Park1ba3ada2018-01-11 11:38:41 -080036#ifdef CONFIG_IPA_WDI_UNIFIED_API
37#include <qdf_ipa_wdi3.h>
38#else
Yun Parkfd269b52017-10-05 14:41:32 -070039#include <qdf_ipa.h>
Yun Parkfde6b9e2017-06-26 17:13:11 -070040#endif
Yun Park1ba3ada2018-01-11 11:38:41 -080041#endif
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053042
Naveen Rawat761329b2017-09-19 10:30:11 -070043/**
44 * bitmap values to indicate special handling of peer_delete
45 */
46#define CDP_PEER_DELETE_NO_SPECIAL 0
47#define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
48
Akshay Kosigi4002f762019-07-08 23:04:36 +053049struct hif_opaque_softc;
50
Soumya Bhatbc719e62018-02-18 18:21:25 +053051/* same as ieee80211_nac_param */
52enum cdp_nac_param_cmd {
53 /* IEEE80211_NAC_PARAM_ADD */
54 CDP_NAC_PARAM_ADD = 1,
55 /* IEEE80211_NAC_PARAM_DEL */
56 CDP_NAC_PARAM_DEL,
57 /* IEEE80211_NAC_PARAM_LIST */
58 CDP_NAC_PARAM_LIST,
59};
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053060/******************************************************************************
61 *
62 * Control Interface (A Interface)
63 *
64 *****************************************************************************/
65
66struct cdp_cmn_ops {
67
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -070068 QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053069
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080070 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053071
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080072 struct cdp_vdev *(*txrx_vdev_attach)
73 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
Rakesh Pillai01b9b682019-07-27 18:58:21 -070074 uint8_t vdev_id, enum wlan_op_mode op_mode,
75 enum wlan_op_subtype subtype);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053076
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080077 void (*txrx_vdev_detach)
78 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053079 void *cb_context);
80
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080081 struct cdp_pdev *(*txrx_pdev_attach)
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +053082 (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
83 qdf_device_t osdev, uint8_t pdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053084
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080085 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
Leo Changdb6358c2016-09-27 17:00:52 -070086
Himanshu Agarwalb7e3c982017-02-23 16:26:33 +053087 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
88
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080089 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053090
Anish Nataraje9d4c3b2018-11-24 22:24:56 +053091 /**
92 * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
93 * @pdev: Dp pdev handle
94 * @force: Force deinit or not
95 *
96 * Return: None
97 */
98 void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force);
99
Dhanashri Atre6d90ef32016-11-10 16:27:38 -0800100 void *(*txrx_peer_create)
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +0530101 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530102
Dhanashri Atre6d90ef32016-11-10 16:27:38 -0800103 void (*txrx_peer_setup)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800104 (struct cdp_vdev *vdev_hdl, void *peer_hdl);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -0800105
Pavankumar Nandeshwar1ab908e2019-01-24 12:53:13 +0530106 void (*txrx_cp_peer_del_response)
107 (ol_txrx_soc_handle soc, struct cdp_vdev *vdev_hdl,
108 uint8_t *peer_mac_addr);
109
Dhanashri Atre6d90ef32016-11-10 16:27:38 -0800110 void (*txrx_peer_teardown)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800111 (struct cdp_vdev *vdev_hdl, void *peer_hdl);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -0800112
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530113 int (*txrx_peer_add_ast)
114 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
115 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
116 uint32_t flags);
117
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530118 int (*txrx_peer_update_ast)
119 (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530120 uint8_t *mac_addr, uint32_t flags);
121
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530122 bool (*txrx_peer_get_ast_info_by_soc)
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530123 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530124 struct cdp_ast_entry_info *ast_entry_info);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530125
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530126 bool (*txrx_peer_get_ast_info_by_pdev)
127 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
128 uint8_t pdev_id,
129 struct cdp_ast_entry_info *ast_entry_info);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530130
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530131 QDF_STATUS (*txrx_peer_ast_delete_by_soc)
132 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
133 txrx_ast_free_cb callback,
134 void *cookie);
Tallapragada Kalyan57b6bb32018-01-02 12:58:33 +0530135
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +0530136 QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
137 (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
138 uint8_t pdev_id,
139 txrx_ast_free_cb callback,
140 void *cookie);
Chaithanya Garrepallicf347d12018-09-18 14:28:55 +0530141
Naveen Rawat761329b2017-09-19 10:30:11 -0700142 void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530143
Pavankumar Nandeshwar753eed32019-01-22 15:40:15 +0530144 void (*txrx_vdev_flush_peers)(struct cdp_vdev *vdev, bool unmap_only);
145
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -0700146 QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
147 uint8_t smart_monitor);
Alok Kumar2e254c52018-11-28 17:26:53 +0530148 void (*txrx_peer_delete_sync)(void *peer,
149 QDF_STATUS(*delete_cb)(
150 uint8_t vdev_id,
151 uint32_t peerid_cnt,
152 uint16_t *peerid_list),
153 uint32_t bitmap);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530154
Alok Kumar14b3ba02019-02-14 14:37:02 +0530155 void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev,
156 QDF_STATUS(*unmap_resp_cb)(
157 uint8_t vdev_id,
158 uint32_t peerid_cnt,
159 uint16_t *peerid_list));
160
phadiman7821bf82018-02-06 16:03:54 +0530161 uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
Vivek Natarajan95f004f2019-01-10 22:15:46 +0530162 bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev);
phadiman7821bf82018-02-06 16:03:54 +0530163
Adil Saeed Musthafa61a21692018-07-17 20:49:31 -0700164 void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
165 int16_t chan_noise_floor);
166
phadiman7821bf82018-02-06 16:03:54 +0530167 void (*txrx_set_nac)(struct cdp_peer *peer);
168
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530169 /**
170 * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture
171 * @soc: opaque soc handle
172 * @pdev: data path pdev handle
173 * @val: value of pdev_tx_capture
174 *
175 * Return: status: 0 - Success, non-zero: Failure
176 */
177 QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
phadiman7821bf82018-02-06 16:03:54 +0530178
179 void (*txrx_get_peer_mac_from_peer_id)
180 (struct cdp_pdev *pdev_handle,
181 uint32_t peer_id, uint8_t *peer_mac);
182
183 void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
184
185 void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
186
Chaithanya Garrepalli2faa46f2018-04-09 12:34:20 +0530187 void (*txrx_ath_getstats)(void *pdev,
188 struct cdp_dev_stats *stats, uint8_t type);
phadiman7821bf82018-02-06 16:03:54 +0530189
190 void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
191 u_int8_t *user_position);
192
193 uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
194
195 void (*txrx_if_mgmt_drain)(void *ni, int force);
196
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800197 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530198
199 void (*txrx_set_privacy_filters)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800200 (struct cdp_vdev *vdev, void *filter, uint32_t num);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530201
jitiphil60ac9aa2018-10-05 19:54:04 +0530202 uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg);
203
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530204 /********************************************************************
205 * Data Interface (B Interface)
206 ********************************************************************/
207
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800208 void (*txrx_vdev_register)(struct cdp_vdev *vdev,
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530209 void *osif_vdev,
Akshay Kosigidbbaef42018-05-03 23:39:27 +0530210 struct ol_txrx_ops *txrx_ops);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530211
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800212 int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530213 qdf_nbuf_t tx_mgmt_frm, uint8_t type);
214
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800215 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530216 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
217 uint16_t chanfreq);
218
219 /**
220 * ol_txrx_mgmt_tx_cb - tx management delivery notification
221 * callback function
222 */
223
Sravan Kumar Kairam786886b2017-07-19 17:38:20 +0530224 void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
225 ol_txrx_mgmt_tx_cb download_cb,
226 ol_txrx_mgmt_tx_cb ota_ack_cb,
227 void *ctxt);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530228
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800229 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530230
231 /**
232 * ol_txrx_data_tx_cb - Function registered with the data path
233 * that is called when tx frames marked as "no free" are
234 * done being transmitted
235 */
236
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800237 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530238 ol_txrx_data_tx_cb callback, void *ctxt);
239
240 /*******************************************************************
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700241 * Statistics and Debugging Interface (C Interface)
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530242 ********************************************************************/
243
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800244 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530245 int max_subfrms_amsdu);
246
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800247 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
248 struct ol_txrx_stats_req *req,
Leo Changdb6358c2016-09-27 17:00:52 -0700249 bool per_vdev, bool response_expected);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530250
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800251 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530252
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800253 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530254 uint8_t cfg_stats_type, uint32_t cfg_val);
255
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800256 void (*txrx_print_level_set)(unsigned level);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530257
258 /**
259 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
260 * @vdev: vdev handle
261 *
262 * Return: vdev mac address
263 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800264 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530265
266 /**
267 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
268 * vdev
269 * @vdev: vdev handle
270 *
271 * Return: Handle to struct qdf_mac_addr
272 */
273 struct qdf_mac_addr *
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800274 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530275
276 /**
277 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
278 * @vdev: vdev handle
279 *
280 * Return: Handle to pdev
281 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800282 struct cdp_pdev *(*txrx_get_pdev_from_vdev)
283 (struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530284
285 /**
286 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
287 * @vdev: vdev handle
288 *
289 * Return: Handle to control pdev
290 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800291 struct cdp_cfg *
292 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530293
chenguo2a733792018-11-01 16:10:38 +0800294 /**
295 * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
296 * @pdev: pdev handle
297 *
298 * Return: Handle to vdev
299 */
300 struct cdp_vdev *
301 (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev);
302
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800303 struct cdp_vdev *
304 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
305 uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530306
307 void (*txrx_soc_detach)(void *soc);
308
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530309 /**
310 * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
311 * @soc: Opaque Dp handle
312 *
313 * Return: None
314 */
315 void (*txrx_soc_deinit)(void *soc);
316
317 /**
318 * txrx_soc_init() - Initialize dp soc and dp ring memory
319 * @soc: Opaque Dp handle
320 * @htchdl: Opaque htc handle
321 * @hifhdl: Opaque hif handle
322 *
323 * Return: None
324 */
Akshay Kosigieec6db92019-07-02 14:25:54 +0530325 void *(*txrx_soc_init)(void *soc,
326 struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
Akshay Kosigi4002f762019-07-08 23:04:36 +0530327 struct hif_opaque_softc *hif_handle,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530328 HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
329 struct ol_if_ops *ol_ops, uint16_t device_id);
330
331 /**
332 * txrx_tso_soc_attach() - TSO attach handler triggered during
333 * dynamic tso activation
334 * @soc: Opaque Dp handle
335 *
336 * Return: QDF status
337 */
338 QDF_STATUS (*txrx_tso_soc_attach)(void *soc);
339
340 /**
341 * txrx_tso_soc_detach() - TSO detach handler triggered during
342 * dynamic tso de-activation
343 * @soc: Opaque Dp handle
344 *
345 * Return: QDF status
346 */
347 QDF_STATUS (*txrx_tso_soc_detach)(void *soc);
Sumedh Baikady1c61e062018-02-12 22:25:47 -0800348 int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
349 int status);
350
Karunakar Dasinenied1de122016-08-02 11:57:59 -0700351 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
Sumedh Baikady1c61e062018-02-12 22:25:47 -0800352 uint16_t tid, uint16_t batimeout,
353 uint16_t buffersize,
354 uint16_t startseqnum);
Karunakar Dasinenied1de122016-08-02 11:57:59 -0700355
356 void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
357 uint8_t *dialogtoken, uint16_t *statuscode,
358 uint16_t *buffersize, uint16_t *batimeout);
359
360 int (*delba_process)(void *peer_handle,
361 int tid, uint16_t reasoncode);
Ishank Jain1e7401c2017-02-17 15:38:39 +0530362
sumedh baikadydf4a57c2018-04-08 22:19:22 -0700363 /**
364 * delba_tx_completion() - Indicate delba tx status
365 * @peer_handle: Peer handle
366 * @tid: Tid number
367 * @status: Tx completion status
368 *
369 * Return: 0 on Success, 1 on failure
370 */
371 int (*delba_tx_completion)(void *peer_handle,
372 uint8_t tid, int status);
373
Gyanranjan Hazarika99a58d32017-12-22 21:56:17 -0800374 void (*set_addba_response)(void *peer_handle,
375 uint8_t tid, uint16_t statuscode);
376
Ishank Jain1e7401c2017-02-17 15:38:39 +0530377 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
378 uint16_t peer_id, uint8_t *mac_addr);
Ishank Jain949674c2017-02-27 17:09:29 +0530379
380 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
381 uint8_t map_id);
Pranita Solanke92096e42018-09-11 11:14:51 +0530382 int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle);
Ishank Jain949674c2017-02-27 17:09:29 +0530383
Manikandan Mohane2fa8b72017-03-22 11:18:26 -0700384 void (*flush_cache_rx_queue)(void);
Ishank Jain949674c2017-02-27 17:09:29 +0530385 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
386 uint8_t tos, uint8_t tid);
Shashikala Prabhu8f6703b2018-10-31 09:43:00 +0530387 void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val);
388 void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid);
Ishank Jain949674c2017-02-27 17:09:29 +0530389
Rakesh Pillaie5430cb2019-11-06 16:48:53 +0530390 QDF_STATUS(*txrx_stats_request)(struct cdp_soc_t *soc_handle,
391 uint8_t vdev_id,
392 struct cdp_txrx_stats_req *req);
Om Prakash Tripathi03efb6a2017-08-23 22:51:28 +0530393
Mohit Khanna90d7ebd2017-09-12 21:54:21 -0700394 QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
395 enum qdf_stats_verbosity_level level);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530396 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
397
398 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +0530399 QDF_STATUS (*txrx_intr_attach)(void *soc);
400 void (*txrx_intr_detach)(void *soc);
Gurumoorthi Gnanasambandhaned4bcf82017-05-24 00:10:59 +0530401 void (*set_pn_check)(struct cdp_vdev *vdev,
402 struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
403 uint32_t *rx_pn);
Venkata Sharath Chandra Manchala3e8add82017-07-10 11:59:54 -0700404 QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
405 struct cdp_config_params *params);
Santosh Anbu2280e862018-01-03 22:25:53 +0530406
407 void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530408 void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
409 void *dp_txrx_hdl);
410
411 void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
412 void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
413 void *dp_txrx_handle);
414
Padma Raghunathan93549e12019-02-28 14:30:55 +0530415 void (*map_pdev_to_lmac)(struct cdp_pdev *pdev_hdl,
416 uint32_t lmac_id);
417
Gyanranjan Hazarikae8047262019-06-05 00:43:38 -0700418 void (*set_pdev_status_down)(struct cdp_pdev *pdev_hdl, bool is_pdev_down);
419
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530420 void (*txrx_peer_reset_ast)
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +0530421 (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
422 uint8_t *peer_macaddr, void *vdev_hdl);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530423
Santosh Anbu76693bc2018-04-23 16:38:54 +0530424 void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
425 void *vdev_hdl);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530426
427 void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
sumedh baikady1f8f3192018-02-20 17:30:32 -0800428 void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
429 uint8_t ac, uint32_t value);
430 void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
431 uint8_t ac, uint32_t *value);
Ruchi, Agrawal89219d92018-02-26 16:43:06 +0530432
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530433 QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530434 uint32_t num_peers,
Tallapragada Kalyana7023622018-12-03 19:29:52 +0530435 uint32_t max_ast_index,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +0530436 bool peer_map_unmap_v2);
Chaithanya Garrepalli2f572792018-04-11 17:49:28 +0530437
Pamidipati, Vijayd3478ef2018-02-06 23:52:29 +0530438 ol_txrx_tx_fp tx_send;
Mohit Khanna7ac554b2018-05-24 11:58:13 -0700439 /**
440 * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
441 * to deliver pkt to stack.
442 * @vdev: vdev handle
443 * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
444 * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
445 */
446 void (*txrx_get_os_rx_handles_from_vdev)
447 (struct cdp_vdev *vdev,
448 ol_txrx_rx_fp *stack_fn,
449 ol_osif_vdev_handle *osif_vdev);
Pranita Solankeafcd0f12018-08-29 22:49:23 +0530450 int (*txrx_classify_update)
451 (struct cdp_vdev *vdev, qdf_nbuf_t skb,
452 enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
Akshay Kosigia4f6e172018-09-03 21:42:27 +0530453
454 bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
455 enum cdp_capabilities dp_caps);
Amir Patel256dcbe2019-02-26 21:49:24 +0530456 void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx);
457 void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
458 void (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
459 struct cdp_pdev *pdev,
460 void *buf);
461 void (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
462 struct cdp_pdev *pdev);
Debasis Dasa3249bd2019-03-01 11:58:22 +0530463 QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_pdev *pdev,
464 uint8_t pcp, uint8_t tid);
465 QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty);
466 QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_vdev *vdev,
467 uint8_t pcp, uint8_t tid);
468 QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty);
469 QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev,
470 uint8_t mapid);
Varsha Mishra6e1760c2019-07-27 22:51:42 +0530471#ifdef QCA_MULTIPASS_SUPPORT
472 QDF_STATUS (*set_vlan_groupkey)(struct cdp_vdev *vdev_handle,
473 uint16_t vlan_id, uint16_t group_key);
474#endif
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530475};
476
477struct cdp_ctrl_ops {
478
479 int
480 (*txrx_mempools_attach)(void *ctrl_pdev);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530481
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530482 int
483 (*txrx_set_filter_neighbour_peers)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800484 struct cdp_pdev *pdev,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +0530485 uint32_t val);
486 int
487 (*txrx_update_filter_neighbour_peers)(
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530488 struct cdp_vdev *vdev,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +0530489 uint32_t cmd, uint8_t *macaddr);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530490 /**
491 * @brief set the safemode of the device
492 * @details
493 * This flag is used to bypass the encrypt and decrypt processes when
494 * send and receive packets. It works like open AUTH mode, HW will
495 * ctreate all packets as non-encrypt frames because no key installed.
496 * For rx fragmented frames,it bypasses all the rx defragmentaion.
497 *
498 * @param vdev - the data virtual device object
499 * @param val - the safemode state
500 * @return - void
501 */
502
503 void
504 (*txrx_set_safemode)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800505 struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530506 u_int32_t val);
507 /**
508 * @brief configure the drop unencrypted frame flag
509 * @details
510 * Rx related. When set this flag, all the unencrypted frames
511 * received over a secure connection will be discarded
512 *
513 * @param vdev - the data virtual device object
514 * @param val - flag
515 * @return - void
516 */
517 void
518 (*txrx_set_drop_unenc)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800519 struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530520 u_int32_t val);
521
522
523 /**
524 * @brief set the Tx encapsulation type of the VDEV
525 * @details
526 * This will be used to populate the HTT desc packet type field
527 * during Tx
528 * @param vdev - the data virtual device object
529 * @param val - the Tx encap type
530 * @return - void
531 */
532 void
533 (*txrx_set_tx_encap_type)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800534 struct cdp_vdev *vdev,
Nandha Kishore Easwaran870abda2016-11-16 17:37:19 +0530535 enum htt_cmn_pkt_type val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530536 /**
537 * @brief set the Rx decapsulation type of the VDEV
538 * @details
539 * This will be used to configure into firmware and hardware
540 * which format to decap all Rx packets into, for all peers under
541 * the VDEV.
542 * @param vdev - the data virtual device object
543 * @param val - the Rx decap mode
544 * @return - void
545 */
546 void
547 (*txrx_set_vdev_rx_decap_type)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800548 struct cdp_vdev *vdev,
Nandha Kishore Easwaran870abda2016-11-16 17:37:19 +0530549 enum htt_cmn_pkt_type val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530550
551 /**
552 * @brief get the Rx decapsulation type of the VDEV
553 *
554 * @param vdev - the data virtual device object
555 * @return - the Rx decap type
556 */
Nandha Kishore Easwaranfb0a7e52017-02-03 21:18:49 +0530557 enum htt_cmn_pkt_type
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800558 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530559
560 /* Is this similar to ol_txrx_peer_state_update() in MCL */
561 /**
562 * @brief Update the authorize peer object at association time
563 * @details
564 * For the host-based implementation of rate-control, it
565 * updates the peer/node-related parameters within rate-control
566 * context of the peer at association.
567 *
568 * @param peer - pointer to the node's object
569 * @authorize - either to authorize or unauthorize peer
570 *
571 * @return none
572 */
573 void
c_cgodavbd5b3c22017-06-07 12:31:40 +0530574 (*txrx_peer_authorize)(struct cdp_peer *peer,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530575 u_int32_t authorize);
576
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530577 /* Should be ol_txrx_ctrl_api.h */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800578 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530579
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530580 /**
581 * @brief setting mesh rx filter
582 * @details
583 * based on the bits enabled in the filter packets has to be dropped.
584 *
585 * @param vdev - the data virtual device object
586 * @param val - value to set
587 */
588 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
589
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800590 void (*tx_flush_buffers)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530591
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800592 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530593
Ishank Jain9f174c62017-03-30 18:37:42 +0530594 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
595 enum cdp_vdev_param_type param, uint32_t val);
596
c_cgodavbd5b3c22017-06-07 12:31:40 +0530597 void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +0530598 /**
599 * @brief Set the reo dest ring num of the radio
600 * @details
601 * Set the reo destination ring no on which we will receive
602 * pkts for this radio.
603 *
604 * @param pdev - the data physical device object
605 * @param reo_dest_ring_num - value ranges between 1 - 4
606 */
607 void (*txrx_set_pdev_reo_dest)(
608 struct cdp_pdev *pdev,
609 enum cdp_host_reo_dest_ring reo_dest_ring_num);
610
611 /**
612 * @brief Get the reo dest ring num of the radio
613 * @details
614 * Get the reo destination ring no on which we will receive
615 * pkts for this radio.
616 *
617 * @param pdev - the data physical device object
618 * @return the reo destination ring number
619 */
620 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
621 struct cdp_pdev *pdev);
Nandha Kishore Easwaran26689942017-04-17 16:52:46 +0530622
623 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
624 uint32_t event);
625
626 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
627 uint32_t event);
c_cgodavbd5b3c22017-06-07 12:31:40 +0530628 int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
Kiran Venkatappae2f43352017-08-23 22:14:44 +0530629
630 void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
631 uint8_t subtype, uint8_t tx_power);
Soumya Bhatcfbb8952017-10-03 15:04:09 +0530632
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530633 /**
634 * txrx_set_pdev_param() - callback to set pdev parameter
635 * @soc: opaque soc handle
636 * @pdev: data path pdev handle
637 * @val: value of pdev_tx_capture
638 *
639 * Return: status: 0 - Success, non-zero: Failure
640 */
641 QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
642 enum cdp_pdev_param_type type,
Kai Chen99efa0d2019-08-20 17:51:27 -0700643 uint32_t val);
Venkata Sharath Chandra Manchala09adf532017-11-03 14:44:35 -0700644 void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
Soumya Bhatbc719e62018-02-18 18:21:25 +0530645
646#ifdef ATH_SUPPORT_NAC_RSSI
647 QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
648 enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
649 uint8_t chan_num);
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530650 QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
651 char *macaddr,
652 uint8_t *rssi);
Soumya Bhatbc719e62018-02-18 18:21:25 +0530653#endif
Pramod Simha6e10cb22018-06-20 12:05:44 -0700654 void (*set_key)(struct cdp_peer *peer_handle,
655 bool is_unicast, uint32_t *key);
phadiman4213e9c2018-10-29 12:50:02 +0530656
657 uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev,
658 enum cdp_vdev_param_type param);
Keyur Parekhc28f8392018-11-21 02:50:56 -0800659 int (*enable_peer_based_pktlog)(struct cdp_pdev
660 *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb);
661
Varsha Mishraa331e6e2019-03-11 12:16:14 +0530662 void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf);
Karunakar Dasineni142f9ba2019-03-19 23:04:59 -0700663#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
664 QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
665 struct cdp_pdev *txrx_pdev_handle,
666 uint32_t protocol_mask, uint16_t protocol_type,
667 uint16_t tag);
668#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
669 void (*txrx_dump_pdev_rx_protocol_tag_stats)(
670 struct cdp_pdev *txrx_pdev_handle,
671 uint16_t protocol_type);
672#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
673#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
Sumeet Raoc4fa4df2019-07-05 02:11:19 -0700674#ifdef WLAN_SUPPORT_RX_FLOW_TAG
675 QDF_STATUS (*txrx_set_rx_flow_tag)(
676 struct cdp_pdev *txrx_pdev_handle,
677 struct cdp_rx_flow_info *flow_info);
678 QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
679 struct cdp_pdev *txrx_pdev_handle,
680 struct cdp_rx_flow_info *flow_info);
681#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
Varsha Mishra6e1760c2019-07-27 22:51:42 +0530682#ifdef QCA_MULTIPASS_SUPPORT
683 void (*txrx_peer_set_vlan_id)(ol_txrx_soc_handle soc,
684 struct cdp_vdev *vdev, uint8_t *peer_mac,
685 uint16_t vlan_id);
686#endif
Sumeet Rao511db292019-07-22 11:42:48 -0700687#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
688 QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
689 struct cdp_pdev *txrx_pdev_handle,
690 bool is_rx_pkt_cap_enable, bool is_tx_pkt_cap_enable,
691 uint8_t *peer_mac);
692#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530693};
694
695struct cdp_me_ops {
696
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530697 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800698 (struct cdp_pdev *pdev, u_int16_t buf_count);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530699
700 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800701 struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530702 u_int16_t buf_count);
703
704 u_int16_t
705 (*tx_get_mcast_buf_allocated_marked)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800706 (struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530707 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800708 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530709
710 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800711 (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530712
713 uint16_t
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800714 (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530715 qdf_nbuf_t wbuf, u_int8_t newmac[][6],
716 uint8_t newmaccnt);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530717 /* Should be a function pointer in ol_txrx_osif_ops{} */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530718 /**
719 * @brief notify mcast frame indication from FW.
720 * @details
721 * This notification will be used to convert
722 * multicast frame to unicast.
723 *
724 * @param pdev - handle to the ctrl SW's physical device object
725 * @param vdev_id - ID of the virtual device received the special data
726 * @param msdu - the multicast msdu returned by FW for host inspect
727 */
728
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800729 int (*mcast_notify)(struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530730 u_int8_t vdev_id, qdf_nbuf_t msdu);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530731};
732
733struct cdp_mon_ops {
734
735 void (*txrx_monitor_set_filter_ucast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800736 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530737 void (*txrx_monitor_set_filter_mcast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800738 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530739 void (*txrx_monitor_set_filter_non_data)
Amir Patel253053f2018-07-17 00:20:57 +0530740 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530741
nobeljc8eb4d62018-01-04 14:29:32 -0800742 bool (*txrx_monitor_get_filter_ucast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800743 (struct cdp_vdev *vdev_txrx_handle);
nobeljc8eb4d62018-01-04 14:29:32 -0800744 bool (*txrx_monitor_get_filter_mcast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800745 (struct cdp_vdev *vdev_txrx_handle);
nobeljc8eb4d62018-01-04 14:29:32 -0800746 bool (*txrx_monitor_get_filter_non_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800747 (struct cdp_vdev *vdev_txrx_handle);
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -0700748 QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530749
nobeljd124b742017-10-16 11:59:12 -0700750 /* HK advance monitor filter support */
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -0700751 QDF_STATUS (*txrx_set_advance_monitor_filter)
nobeljd124b742017-10-16 11:59:12 -0700752 (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
Jinwei Chene1ffcf02019-06-12 22:31:27 +0800753
754 void (*txrx_monitor_record_channel)
755 (struct cdp_pdev *, int val);
Karunakar Dasineni13abde92019-09-10 12:40:41 -0700756
757 void (*txrx_deliver_tx_mgmt)
758 (struct cdp_pdev *pdev, qdf_nbuf_t nbuf);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530759};
760
Alok Kumar3d15ae82019-08-15 20:56:40 +0530761#ifdef WLAN_FEATURE_PKT_CAPTURE
762struct cdp_pktcapture_ops {
763 void (*txrx_pktcapture_set_mode)
764 (struct cdp_soc_t *soc,
765 uint8_t pdev_id,
766 uint8_t mode);
767
768 uint8_t (*txrx_pktcapture_get_mode)
769 (struct cdp_soc_t *soc,
770 uint8_t pdev_id);
771
772 QDF_STATUS (*txrx_pktcapture_cb_register)
773 (struct cdp_soc_t *soc,
774 uint8_t pdev_id,
775 void *context,
776 QDF_STATUS(cb)(void *, qdf_nbuf_t));
777
778 QDF_STATUS (*txrx_pktcapture_cb_deregister)
779 (struct cdp_soc_t *soc,
780 uint8_t pdev_id);
781
782 QDF_STATUS (*txrx_pktcapture_mgmtpkt_process)
783 (struct cdp_soc_t *soc,
784 uint8_t pdev_id,
785 struct mon_rx_status *txrx_status,
786 qdf_nbuf_t nbuf, uint8_t status);
787
788 void (*txrx_pktcapture_record_channel)
789 (struct cdp_soc_t *soc,
790 uint8_t pdev_id,
791 int chan_no);
792};
793#endif /* #ifdef WLAN_FEATURE_PKT_CAPTURE */
794
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530795struct cdp_host_stats_ops {
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530796 int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
797 struct ol_txrx_stats_req *req);
Ishank Jain6290a3c2017-03-21 10:49:39 +0530798
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530799 QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
800 uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530801
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530802 QDF_STATUS
803 (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530804
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530805 int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
806 struct cdp_stats_extd *buf);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530807 /**
808 * @brief Enable enhanced stats functionality.
809 *
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530810 * @param soc - the soc handle
811 * @param pdev_id - pdev_id of pdev
812 * @return - QDF_STATUS
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530813 */
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530814 QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
815 uint8_t pdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530816
817 /**
818 * @brief Disable enhanced stats functionality.
819 *
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530820 * @param soc - the soc handle
821 * @param pdev_id - pdev_id of pdev
822 * @return - QDF_STATUS
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530823 */
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530824 QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
825 uint8_t pdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530826
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530827 QDF_STATUS
828 (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530829
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530830 QDF_STATUS
831 (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530832
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530833 QDF_STATUS
834 (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530835
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530836 QDF_STATUS
837 (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530838
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530839 QDF_STATUS
840 (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530841
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530842 QDF_STATUS
843 (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530844
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530845 QDF_STATUS
846 (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700847
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530848 QDF_STATUS
849 (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
Venkata Sharath Chandra Manchala0cb31982018-03-30 15:55:26 -0700850
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530851 int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
852 struct ol_txrx_stats_req *req);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530853
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530854 int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
855 uint8_t pdev_id,
856 uint8_t *addr, void *stats,
857 uint32_t last_tx_rate_mcs,
858 uint32_t stats_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530859
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530860 QDF_STATUS
861 (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
862 uint8_t *addr,
863 uint32_t cap, uint32_t copy_stats);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530864
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530865 QDF_STATUS
866 (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
867 void *data,
868 uint32_t data_len);
869 QDF_STATUS
870 (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
871 uint8_t pdev_id, void *data,
Amir Patel253053f2018-07-17 00:20:57 +0530872 uint16_t stats_id);
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530873 QDF_STATUS
874 (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
875 uint8_t *peer_mac,
876 struct cdp_peer_stats *peer_stats);
877 QDF_STATUS
878 (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
879 uint8_t vdev_id,
880 uint8_t *peer_mac);
881 QDF_STATUS
882 (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
883 uint8_t vdev_id, uint8_t *peer_mac);
Amir Patel253053f2018-07-17 00:20:57 +0530884 int
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530885 (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
886 void *buf, bool is_aggregate);
Amir Patel253053f2018-07-17 00:20:57 +0530887 int
888 (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
889 void *data, uint32_t len,
890 uint32_t stats_id);
891 int
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530892 (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
893 uint8_t vdev_id,
894 wmi_host_vdev_extd_stats *buffer);
895 QDF_STATUS
896 (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
897 uint8_t vdev_id, void *buf,
Debasis Dasc2467912018-09-10 20:27:07 +0530898 uint16_t stats_id);
Amir Patel756d05e2018-10-10 12:35:30 +0530899 int
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530900 (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
Amir Patel756d05e2018-10-10 12:35:30 +0530901 void *buf);
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530902 QDF_STATUS
903 (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
904 struct cdp_pdev_stats *buf);
Surya Prakash Raajen3a01bdd2019-02-19 13:19:36 +0530905 int
906 (*txrx_get_ratekbps)(int preamb, int mcs,
907 int htflag, int gintval);
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530908
909 QDF_STATUS
910 (*configure_rate_stats)(struct cdp_soc_t *soc, uint8_t val);
911
912 QDF_STATUS
913 (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
914 uint8_t *peer_mac, void *stats,
915 uint32_t last_tx_rate_mcs,
916 uint32_t stats_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530917};
918
919struct cdp_wds_ops {
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530920 QDF_STATUS
921 (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
922 u_int32_t val);
923 QDF_STATUS
924 (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
925 uint8_t vdev_id, uint8_t *peer_mac,
926 int wds_tx_ucast, int wds_tx_mcast);
927 int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
928 uint32_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530929};
930
931struct cdp_raw_ops {
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530932 int (*txrx_get_nwifi_mode)(struct cdp_soc_t *soc, uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530933
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530934 QDF_STATUS
935 (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
936 qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530937};
938
Pranita Solanke05862962019-01-09 11:39:29 +0530939#ifdef PEER_FLOW_CONTROL
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530940struct cdp_pflow_ops {
Pavankumar Nandeshware54c5842019-09-29 16:01:09 +0530941 uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
942 uint8_t pdev_id,
943 enum _ol_ath_param_t,
944 uint32_t, void *);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530945};
Pranita Solanke05862962019-01-09 11:39:29 +0530946#endif /* PEER_FLOW_CONTROL */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530947
Dhanashri Atre14049172016-11-11 18:32:36 -0800948#define LRO_IPV4_SEED_ARR_SZ 5
949#define LRO_IPV6_SEED_ARR_SZ 11
950
951/**
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700952 * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
953 * @lro_enable: indicates whether rx_offld is enabled
Dhanashri Atre14049172016-11-11 18:32:36 -0800954 * @tcp_flag: If the TCP flags from the packet do not match
955 * the values in this field after masking with TCP flags mask
Manjunathappa Prakash56023f52018-03-28 20:05:56 -0700956 * below, packet is not rx_offld eligible
Dhanashri Atre14049172016-11-11 18:32:36 -0800957 * @tcp_flag_mask: field for comparing the TCP values provided
958 * above with the TCP flags field in the received packet
959 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
960 * 5-tuple toeplitz hash for ipv4 packets
961 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
962 * 5-tuple toeplitz hash for ipv6 packets
963 */
964struct cdp_lro_hash_config {
965 uint32_t lro_enable;
966 uint32_t tcp_flag:9,
967 tcp_flag_mask:9;
968 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
969 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
970};
971
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530972struct ol_if_ops {
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +0530973 void
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530974 (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
975 uint8_t pdev_id, uint8_t *peer_macaddr,
976 uint8_t vdev_id,
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +0530977 bool hash_based, uint8_t ring_num);
978 QDF_STATUS
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530979 (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
980 uint8_t pdev_id,
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +0530981 uint8_t vdev_id, uint8_t *peer_mac,
982 qdf_dma_addr_t hw_qdesc, int tid,
Gyanranjan Hazarika7f9c0502018-07-25 23:26:16 -0700983 uint16_t queue_num,
984 uint8_t ba_window_size_valid,
985 uint16_t ba_window_size);
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +0530986 QDF_STATUS
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530987 (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
988 uint8_t pdev_id,
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +0530989 uint8_t vdev_id, uint8_t *peer_macaddr,
990 uint32_t tid_mask);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530991 int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
992 uint8_t pdev_id,
993 uint8_t *peer_mac,
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +0530994 uint8_t *vdev_mac, enum wlan_op_mode opmode);
Pramod Simha7f7b4aa2017-03-27 14:48:09 -0700995 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +0530996 int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
997 uint8_t vdev_id,
998 uint8_t *peer_macaddr,
syed touqeer pasha0050ec92018-10-14 19:36:15 +0530999 const uint8_t *dest_macaddr,
1000 uint8_t *next_node_mac,
1001 uint32_t flags);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301002 int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
1003 uint8_t vdev_id,
1004 uint8_t *dest_macaddr,
1005 uint8_t *peer_macaddr,
1006 uint32_t flags);
1007 void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
1008 uint8_t vdev_id,
Chaithanya Garrepalli267ae0e2019-02-19 23:45:12 +05301009 uint8_t *wds_macaddr,
1010 uint8_t type);
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05301011 QDF_STATUS
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301012 (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
Akshay Kosigi0e7fdae2018-05-17 12:16:57 +05301013 struct cdp_lro_hash_config *rx_offld_hash);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301014
Ishank Jain1e7401c2017-02-17 15:38:39 +05301015 void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
1016 uint8_t type);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301017#ifdef FEATURE_NAC_RSSI
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301018 uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
1019 uint8_t pdev_id, void *msg);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301020#else
1021 uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
Jinwei Chen46733102018-08-20 15:42:08 +08001022#endif
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301023
1024 int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
Akshay Kosigieec6db92019-07-02 14:25:54 +05301025 uint16_t peer_id, uint16_t hw_peer_id,
1026 uint8_t vdev_id, uint8_t *peer_mac_addr,
1027 enum cdp_txrx_ast_entry_type peer_type,
1028 uint32_t tx_ast_hashidx);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301029 int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
Akshay Kosigieec6db92019-07-02 14:25:54 +05301030 uint16_t peer_id,
Subhranil Choudhury9bcfecf2019-02-28 13:41:45 +05301031 uint8_t vdev_id);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301032
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301033 int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
Akshay Kosigieec6db92019-07-02 14:25:54 +05301034 enum cdp_cfg_param_type param_num);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +05301035
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301036 void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
1037 uint8_t pdev_id,
Rakshith Suresh Patkard863f8d2019-07-16 16:30:59 +05301038 struct cdp_rx_mic_err_info *info);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301039
1040 bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
1041 uint8_t vdev_id, uint8_t *peer_mac_addr,
Akshay Kosigi78eced82018-05-14 14:53:48 +05301042 qdf_nbuf_t nbuf,
Pramod Simha6e10cb22018-06-20 12:05:44 -07001043 uint16_t hdr_space);
Gurumoorthi Gnanasambandhan25607a72017-08-07 11:53:16 +05301044
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301045 uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
1046 uint8_t vdev_id, uint16_t freq);
1047
Soumya Bhatbc719e62018-02-18 18:21:25 +05301048#ifdef ATH_SUPPORT_NAC_RSSI
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301049 int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
1050 uint8_t pdev_id,
1051 u_int8_t vdev_id,
1052 enum cdp_nac_param_cmd cmd, char *bssid,
1053 char *client_macaddr, uint8_t chan_num);
1054
1055 int
1056 (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
1057 uint8_t pdev_id, u_int8_t vdev_id,
1058 enum cdp_nac_param_cmd cmd,
1059 char *bssid, char *client_mac);
Soumya Bhatbc719e62018-02-18 18:21:25 +05301060#endif
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301061 int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
1062 uint16_t pdev_id, uint8_t *peer_macaddr);
Pamidipati, Vijayd578db12018-04-09 23:03:12 +05301063
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001064 /**
1065 * send_delba() - Send delba to peer
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +05301066 * @psoc: Objmgr soc handle
1067 * @vdev_id: dp vdev id
sumedh baikadydf4a57c2018-04-08 22:19:22 -07001068 * @peer_macaddr: Peer mac addr
1069 * @tid: Tid number
1070 *
1071 * Return: 0 for success, non-zero for failure
1072 */
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301073 int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
Pavankumar Nandeshwar715fdc32019-10-03 20:51:01 +05301074 uint8_t *peer_macaddr, uint8_t tid,
sumedh baikadyfaadbb62018-08-21 21:13:42 -07001075 uint8_t reason_code);
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05301076
1077 int
1078 (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
1079 uint8_t vdev_id,
1080 uint8_t *dest_macaddr,
1081 uint8_t *peer_macaddr,
1082 uint32_t flags);
Manjunathappa Prakash85de96c2019-05-23 17:35:12 -07001083
1084 bool (*is_roam_inprogress)(uint32_t vdev_id);
Jinwei Chen0f015f22019-07-18 19:47:59 +08001085 enum QDF_GLOBAL_MODE (*get_con_mode)(void);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301086 /* TODO: Add any other control path calls required to OL_IF/WMA layer */
1087};
1088
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301089#ifdef DP_PEER_EXTENDED_API
Leo Changdb6358c2016-09-27 17:00:52 -07001090/**
1091 * struct cdp_misc_ops - mcl ops not classified
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301092 * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
1093 * @set_wmm_param: set wmm parameters
1094 * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
1095 * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
1096 * @hl_tdls_flag_reset: reset tdls flag for vdev
1097 * @tx_non_std: Allow the control-path SW to send data frames
1098 * @get_vdev_id: get vdev id
1099 * @set_wisa_mode: set wisa mode for a vdev
1100 * @txrx_data_stall_cb_register: register data stall callback
1101 * @txrx_data_stall_cb_deregister: deregister data stall callback
1102 * @txrx_post_data_stall_event: post data stall event
1103 * @runtime_suspend: ensure TXRX is ready to runtime suspend
1104 * @runtime_resume: ensure TXRX is ready to runtime resume
1105 * @get_opmode: get operation mode of vdev
1106 * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
1107 marking first packet after wow wakeup
1108 * @update_mac_id: update mac_id for vdev
1109 * @flush_rx_frames: flush rx frames on the queue
1110 * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
1111 has been forwarded from txrx layer
1112 without going to upper layers
1113 * @pkt_log_init: handler to initialize packet log
1114 * @pkt_log_con_service: handler to connect packet log service
1115 * @get_num_rx_contexts: handler to get number of RX contexts
1116 * @register_packetdump_cb: register callback for different pktlog
1117 * @unregister_packetdump_cb: unregister callback for different pktlog
1118 * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
1119 * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
1120 *
1121 * Function pointers for miscellaneous soc/pdev/vdev related operations.
Leo Changdb6358c2016-09-27 17:00:52 -07001122 */
1123struct cdp_misc_ops {
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301124 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
1125 uint8_t vdev_id,
1126 uint16_t timer_value_sec);
1127 void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1128 struct ol_tx_wmm_param_t wmm_param);
1129 void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
1130 uint8_t pdev_id, int enable,
1131 int period, int txq_limit);
1132 void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
1133 uint8_t pdev_id,
1134 int level, int tput_thresh,
1135 int tx_limit);
1136 void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
1137 uint8_t vdev_id, bool flag);
1138 qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1139 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001140 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301141 uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
1142 uint8_t vdev_id);
1143 QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
1144 uint8_t vdev_id, bool enable);
1145 QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
1146 uint8_t pdev_id,
1147 data_stall_detect_cb cb);
1148 QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
1149 uint8_t pdev_id,
1150 data_stall_detect_cb cb);
Poddar, Siddarth5c57a892017-09-04 12:16:38 +05301151 void (*txrx_post_data_stall_event)(
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301152 struct cdp_soc_t *soc_hdl,
Poddar, Siddarth5c57a892017-09-04 12:16:38 +05301153 enum data_stall_log_event_indicator indicator,
1154 enum data_stall_log_event_type data_stall_type,
1155 uint32_t pdev_id, uint32_t vdev_id_bitmap,
1156 enum data_stall_log_recovery_type recovery_type);
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301157 QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
1158 uint8_t pdev_id);
1159 QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
1160 uint8_t pdev_id);
1161 int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1162 void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
1163 uint8_t pdev_id, uint8_t value);
1164 void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1165 uint8_t mac_id);
1166 void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1167 void *peer, bool drop);
1168 A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
1169 uint8_t vdev_id,
1170 uint64_t *fwd_tx_packets,
1171 uint64_t *fwd_rx_packets);
1172 void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
1173 void *scn);
1174 void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
1175 uint8_t pdev_id, void *scn);
1176 int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
1177 void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1178 ol_txrx_pktdump_cb tx_cb,
Lin Bai324f4912018-12-13 16:13:24 +08001179 ol_txrx_pktdump_cb rx_cb);
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301180 void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
1181 uint8_t pdev_id);
1182 void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
1183 uint8_t pdev_id);
1184 void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
1185 uint8_t vdev_id,
Tiger Yu6f1fc002019-04-25 10:41:30 +08001186 unsigned long rx_packets,
1187 uint32_t time_in_ms,
1188 uint32_t high_th,
1189 uint32_t low_th);
Nirav Shahaa6ca442019-11-13 18:17:05 +05301190 void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
1191 unsigned long tx_bytes,
1192 uint32_t time_in_ms,
1193 uint32_t high_th,
1194 uint32_t low_th);
1195 void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
1196 uint8_t pdev_id);
Leo Changdb6358c2016-09-27 17:00:52 -07001197};
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301198
Leo Changdb6358c2016-09-27 17:00:52 -07001199/**
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301200 * struct cdp_ocb_ops - mcl ocb ops
Rakesh Pillai5396b882019-07-07 00:36:41 +05301201 * @set_ocb_chan_info: set OCB channel info
1202 * @get_ocb_chan_info: get OCB channel info
1203 *
1204 * Function pointers for operations related to OCB.
Leo Changdb6358c2016-09-27 17:00:52 -07001205 */
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301206struct cdp_ocb_ops {
Rakesh Pillai5396b882019-07-07 00:36:41 +05301207 void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1208 struct ol_txrx_ocb_set_chan ocb_set_chan);
1209 struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
1210 struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301211};
1212
1213/**
1214 * struct cdp_peer_ops - mcl peer related ops
1215 * @register_peer:
1216 * @clear_peer:
1217 * @cfg_attach:
1218 * @find_peer_by_addr:
1219 * @find_peer_by_addr_and_vdev:
1220 * @local_peer_id:
1221 * @peer_find_by_local_id:
1222 * @peer_state_update:
1223 * @get_vdevid:
1224 * @get_vdev_by_sta_id:
1225 * @register_ocb_peer:
1226 * @peer_get_peer_mac_addr:
1227 * @get_peer_state:
1228 * @get_vdev_for_peer:
1229 * @update_ibss_add_peer_num_of_vdev:
1230 * @remove_peers_for_vdev:
1231 * @remove_peers_for_vdev_no_lock:
1232 * @copy_mac_addr_raw:
1233 * @add_last_real_peer:
1234 * @is_vdev_restore_last_peer:
1235 * @update_last_real_peer:
1236 */
1237struct cdp_peer_ops {
1238 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
1239 struct ol_txrx_desc_type *sta_desc);
Rakshith Suresh Patkar03751082019-07-26 12:30:23 +05301240 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev,
1241 struct qdf_mac_addr peer_addr);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301242 QDF_STATUS (*change_peer_state)(uint8_t sta_id,
1243 enum ol_txrx_peer_state sta_state,
1244 bool roam_synch_in_progress);
1245 void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301246 uint8_t *peer_addr,
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301247 enum peer_debug_id_type debug_id);
1248 void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
1249 void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301250 uint8_t *peer_addr);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301251 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
1252 struct cdp_vdev *vdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301253 uint8_t *peer_addr);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301254 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
1255 uint8_t *peer_addr,
1256 enum ol_txrx_peer_state state);
1257 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
Rakshith Suresh Patkarfb42ec32019-07-26 13:52:00 +05301258 struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
1259 struct qdf_mac_addr peer_addr);
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301260 QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301261 uint8_t * (*peer_get_peer_mac_addr)(void *peer);
1262 int (*get_peer_state)(void *peer);
1263 struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
1264 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
1265 int16_t peer_num_delta);
1266 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
1267 ol_txrx_vdev_peer_remove_cb callback,
1268 void *callback_context, bool remove_last_peer);
1269 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
1270 ol_txrx_vdev_peer_remove_cb callback,
1271 void *callback_context);
1272 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
1273 void (*add_last_real_peer)(struct cdp_pdev *pdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301274 struct cdp_vdev *vdev);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301275 bool (*is_vdev_restore_last_peer)(void *peer);
1276 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev,
Yeshwanth Sriram Guntuka65d54772019-11-22 14:50:02 +05301277 bool restore_last_peer);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301278 void (*peer_detach_force_delete)(void *peer);
nakul kachhwahaf9ae9362019-10-24 17:46:02 +05301279 void (*set_tdls_offchan_enabled)(void *peer, bool val);
1280 void (*set_peer_as_tdls_peer)(void *peer, bool val);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301281};
1282
1283/**
Rakesh Pillai2b88f072019-07-09 14:37:28 +05301284 * struct cdp_mob_stats_ops - mcl mob stats ops
1285 * @clear_stats: handler to clear ol txrx stats
1286 * @stats: handler to update ol txrx stats
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301287 */
1288struct cdp_mob_stats_ops {
Rakesh Pillai2b88f072019-07-09 14:37:28 +05301289 QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
1290 uint8_t pdev_id, uint8_t bitmap);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301291 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
Leo Changdb6358c2016-09-27 17:00:52 -07001292};
1293
1294/**
1295 * struct cdp_pmf_ops - mcl protected management frame ops
Vevek Venkatesandc1517e2019-09-16 23:52:28 +05301296 * @get_pn_info: handler to get pn info from peer
1297 *
1298 * Function pointers for pmf related operations.
Leo Changdb6358c2016-09-27 17:00:52 -07001299 */
1300struct cdp_pmf_ops {
Vevek Venkatesandc1517e2019-09-16 23:52:28 +05301301 void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
1302 uint8_t vdev_id, uint8_t **last_pn_valid,
1303 uint64_t **last_pn, uint32_t **rmf_pn_replays);
Leo Changdb6358c2016-09-27 17:00:52 -07001304};
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301305#endif
Leo Changdb6358c2016-09-27 17:00:52 -07001306
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301307
1308#ifdef DP_FLOW_CTL
Leo Changdb6358c2016-09-27 17:00:52 -07001309/**
1310 * struct cdp_cfg_ops - mcl configuration ops
Jiani Liu7067cd42019-05-09 11:17:51 +08001311 * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
1312 * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
1313 * @cfg_attach: hardcode the configuration parameters
1314 * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
1315 * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
1316 * 1 enabled, 0 disabled.
1317 * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
1318 * indicate that mgmt over wmi is enabled
1319 * or not,
1320 * 1 for enabled, 0 for disable
1321 * @is_high_latency: get device is high or low latency device,
1322 * 1 high latency bus, 0 low latency bus
1323 * @set_flow_control_parameters: set flow control parameters
1324 * @set_flow_steering: set flow_steering_enabled flag
1325 * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
1326 * @set_new_htt_msg_format: set new_htt_msg_format flag
1327 * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
1328 * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
1329 * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
1330 * 1 enabled, 0 disabled.
1331 * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
1332 * 1 enabled, 0 disabled.
Leo Changdb6358c2016-09-27 17:00:52 -07001333 */
1334struct cdp_cfg_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001335 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
1336 uint8_t disable_rx_fwd);
1337 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
1338 uint8_t val);
1339 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
1340 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
1341 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
Leo Changdb6358c2016-09-27 17:00:52 -07001342 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001343 int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
1344 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
1345 void *param);
1346 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
Yu Wanga3f76c52017-08-10 16:58:13 +08001347 void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
jitiphile65cc2d2018-11-05 14:31:21 +05301348 void (*set_new_htt_msg_format)(uint8_t val);
Alok Kumar2e254c52018-11-28 17:26:53 +05301349 void (*set_peer_unmap_conf_support)(bool val);
1350 bool (*get_peer_unmap_conf_support)(void);
Jiani Liu7067cd42019-05-09 11:17:51 +08001351 void (*set_tx_compl_tsf64)(bool val);
1352 bool (*get_tx_compl_tsf64)(void);
Leo Changdb6358c2016-09-27 17:00:52 -07001353};
1354
1355/**
1356 * struct cdp_flowctl_ops - mcl flow control
Rakesh Pillaidce01372019-06-28 19:11:23 +05301357 * @flow_pool_map_handler: handler to map flow_id and pool descriptors
1358 * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
1359 * @register_pause_cb: handler to register tx pause callback
1360 * @set_desc_global_pool_size: handler to set global pool size
1361 * @dump_flow_pool_info: handler to dump global and flow pool info
1362 * @tx_desc_thresh_reached: handler to set tx desc threshold
1363 *
1364 * Function pointers for operations related to flow control
Leo Changdb6358c2016-09-27 17:00:52 -07001365 */
1366struct cdp_flowctl_ops {
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08001367 QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
Rakesh Pillaidce01372019-06-28 19:11:23 +05301368 uint8_t pdev_id,
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08001369 uint8_t vdev_id);
1370 void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
Rakesh Pillaidce01372019-06-28 19:11:23 +05301371 uint8_t pdev_id,
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08001372 uint8_t vdev_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07001373 QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
1374 tx_pause_callback);
Leo Changdb6358c2016-09-27 17:00:52 -07001375 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -08001376
Rakesh Pillaidce01372019-06-28 19:11:23 +05301377 void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +05301378
Rakesh Pillaidce01372019-06-28 19:11:23 +05301379 bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
1380 uint8_t vdev_id);
Leo Changdb6358c2016-09-27 17:00:52 -07001381};
1382
1383/**
1384 * struct cdp_lflowctl_ops - mcl legacy flow control ops
Rakesh Pillai20325542019-11-07 19:26:36 +05301385 * @register_tx_flow_control: Register tx flow control callback
1386 * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
1387 * @set_vdev_os_queue_status: Set vdev queue status
1388 * @deregister_tx_flow_control_cb: Deregister tx flow control callback
1389 * @flow_control_cb: Call osif flow control callback
1390 * @get_tx_resource: Get tx resources and comapre with watermark
1391 * @ll_set_tx_pause_q_depth: set pause queue depth
1392 * @vdev_flush: Flush all packets on a particular vdev
1393 * @vdev_pause: Pause a particular vdev
1394 * @vdev_unpause: Unpause a particular vdev
1395 *
1396 * Function pointers for operations related to flow control
Leo Changdb6358c2016-09-27 17:00:52 -07001397 */
1398struct cdp_lflowctl_ops {
Ajit Pal Singhd1543e02018-04-19 15:02:22 +05301399#ifdef QCA_HL_NETDEV_FLOW_CONTROL
Rakesh Pillai20325542019-11-07 19:26:36 +05301400 int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
Rakesh Pillaid295d1e2019-09-11 08:00:36 +05301401 uint8_t pdev_id,
Ajit Pal Singhd1543e02018-04-19 15:02:22 +05301402 tx_pause_callback flowcontrol);
Rakesh Pillai20325542019-11-07 19:26:36 +05301403 int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
Yue Ma9c43a472019-11-12 12:51:02 -08001404 uint8_t vdev_id, uint32_t chan_freq);
Rakesh Pillai20325542019-11-07 19:26:36 +05301405 int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
1406 uint8_t vdev_id,
Ajit Pal Singh506c4d62018-04-25 16:59:19 +05301407 enum netif_action_type action);
Ajit Pal Singhd1543e02018-04-19 15:02:22 +05301408#else
Rakesh Pillai20325542019-11-07 19:26:36 +05301409 int (*register_tx_flow_control)(
1410 struct cdp_soc_t *soc_hdl,
1411 uint8_t vdev_id,
bings4dcaf8b2017-08-11 10:37:46 +08001412 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
1413 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
Ajit Pal Singhd1543e02018-04-19 15:02:22 +05301414#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
Rakesh Pillai20325542019-11-07 19:26:36 +05301415 int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
1416 uint8_t vdev_id);
1417 void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1418 bool tx_resume);
1419 bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1420 struct qdf_mac_addr peer_addr,
1421 unsigned int low_watermark,
1422 unsigned int high_watermark_offset);
1423 int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
1424 int pause_q_depth);
1425 void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
1426 void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1427 uint32_t reason, uint32_t pause_type);
1428 void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1429 uint32_t reason, uint32_t pause_type);
Leo Changdb6358c2016-09-27 17:00:52 -07001430};
1431
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301432/**
Rakesh Pillai20e302a2019-07-08 16:22:56 +05301433 * struct cdp_throttle_ops - mcl throttle ops
1434 * @throttle_init_period: handler to initialize tx throttle time
1435 * @throttle_set_level: handler to set tx throttle level
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301436 */
1437struct cdp_throttle_ops {
Rakesh Pillai20e302a2019-07-08 16:22:56 +05301438 void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
1439 uint8_t pdev_id, int period,
1440 uint8_t *dutycycle_level);
1441 void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
1442 uint8_t pdev_id, int level);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301443};
1444#endif
1445
Yun Parkfde6b9e2017-06-26 17:13:11 -07001446#ifdef IPA_OFFLOAD
Leo Changdb6358c2016-09-27 17:00:52 -07001447/**
1448 * struct cdp_ipa_ops - mcl ipa data path ops
1449 * @ipa_get_resource:
1450 * @ipa_set_doorbell_paddr:
1451 * @ipa_set_active:
1452 * @ipa_op_response:
1453 * @ipa_register_op_cb:
1454 * @ipa_get_stat:
1455 * @ipa_tx_data_frame:
1456 */
1457struct cdp_ipa_ops {
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301458 QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
1459 uint8_t pdev_id);
1460 QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
1461 uint8_t pdev_id);
1462 QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1463 bool uc_active, bool is_tx);
1464 QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
1465 uint8_t pdev_id, uint8_t *op_msg);
1466 QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
1467 uint8_t pdev_id,
1468 void (*ipa_uc_op_cb_type)
1469 (uint8_t *op_msg, void *osif_ctxt),
1470 void *usr_ctxt);
1471 QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
1472 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
1473 uint8_t vdev_id, qdf_nbuf_t skb);
Yun Parkfde6b9e2017-06-26 17:13:11 -07001474 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -08001475 uint32_t value);
Yun Parkfde6b9e2017-06-26 17:13:11 -07001476#ifdef FEATURE_METERING
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301477 QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
1478 uint8_t pdev_id,
1479 uint8_t reset_stats);
1480 QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
1481 uint8_t pdev_id, uint64_t quota_bytes);
Yun Parkfde6b9e2017-06-26 17:13:11 -07001482#endif
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301483 QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
1484 uint8_t pdev_id);
1485 QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
1486 uint8_t pdev_id);
Yun Park1ba3ada2018-01-11 11:38:41 -08001487#ifdef CONFIG_IPA_WDI_UNIFIED_API
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301488 QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1489 void *ipa_i2w_cb, void *ipa_w2i_cb,
1490 void *ipa_wdi_meter_notifier_cb,
1491 uint32_t ipa_desc_size, void *ipa_priv,
1492 bool is_rm_enabled, uint32_t *tx_pipe_handle,
1493 uint32_t *rx_pipe_handle, bool is_smmu_enabled,
1494 qdf_ipa_sys_connect_params_t *sys_in,
1495 bool over_gsi);
Yun Park1ba3ada2018-01-11 11:38:41 -08001496#else /* CONFIG_IPA_WDI_UNIFIED_API */
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301497 QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1498 void *ipa_i2w_cb, void *ipa_w2i_cb,
1499 void *ipa_wdi_meter_notifier_cb,
1500 uint32_t ipa_desc_size, void *ipa_priv,
1501 bool is_rm_enabled, uint32_t *tx_pipe_handle,
1502 uint32_t *rx_pipe_handle);
Yun Park1ba3ada2018-01-11 11:38:41 -08001503#endif /* CONFIG_IPA_WDI_UNIFIED_API */
Yun Parkfde6b9e2017-06-26 17:13:11 -07001504 QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
1505 uint32_t rx_pipe_handle);
1506 QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
Yun Parkfd269b52017-10-05 14:41:32 -07001507 qdf_ipa_client_type_t prod_client,
1508 qdf_ipa_client_type_t cons_client,
Yun Parkfde6b9e2017-06-26 17:13:11 -07001509 uint8_t session_id, bool is_ipv6_enabled);
1510 QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301511 QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
1512 uint8_t pdev_id);
1513 QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
1514 uint8_t pdev_id);
Yun Parkfde6b9e2017-06-26 17:13:11 -07001515 QDF_STATUS (*ipa_set_perf_level)(int client,
1516 uint32_t max_supported_bw_mbps);
Vevek Venkatesan2cc8c5d2019-08-22 16:29:46 +05301517 bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1518 qdf_nbuf_t nbuf, bool *fwd_success);
Leo Changdb6358c2016-09-27 17:00:52 -07001519};
Yun Parkfde6b9e2017-06-26 17:13:11 -07001520#endif
Leo Changdb6358c2016-09-27 17:00:52 -07001521
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301522#ifdef DP_POWER_SAVE
1523/**
1524 * struct cdp_tx_delay_ops - mcl tx delay ops
Rakesh Pillaia0a2fe52019-07-04 20:11:58 +05301525 * @tx_delay: handler to get tx packet delay
1526 * @tx_delay_hist: handler to get tx packet delay histogram
1527 * @tx_packet_count: handler to get tx packet count
1528 * @tx_set_compute_interval: update compute interval period for TSM stats
1529 *
1530 * Function pointer for operations related to tx delay.
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301531 */
1532struct cdp_tx_delay_ops {
Rakesh Pillaia0a2fe52019-07-04 20:11:58 +05301533 void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1534 uint32_t *queue_delay_microsec,
1535 uint32_t *tx_delay_microsec, int category);
1536 void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1537 uint16_t *bin_values, int category);
1538 void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1539 uint16_t *out_packet_count,
1540 uint16_t *out_packet_loss_count, int category);
1541 void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
1542 uint8_t pdev_id, uint32_t interval);
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301543};
1544
Leo Changdb6358c2016-09-27 17:00:52 -07001545/**
Leo Changdb6358c2016-09-27 17:00:52 -07001546 * struct cdp_bus_ops - mcl bus suspend/resume ops
Rakesh Pillai1d4d12e2019-09-13 04:15:08 +05301547 * @bus_suspend: handler for bus suspend
1548 * @bus_resume: handler for bus resume
Leo Changdb6358c2016-09-27 17:00:52 -07001549 */
1550struct cdp_bus_ops {
Rakesh Pillai1d4d12e2019-09-13 04:15:08 +05301551 QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
1552 QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
Leo Changdb6358c2016-09-27 17:00:52 -07001553};
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301554#endif
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301555
Manjunathappa Prakash56023f52018-03-28 20:05:56 -07001556#ifdef RECEIVE_OFFLOAD
1557/**
Mohit Khanna16816ae2018-10-30 14:12:03 -07001558 * struct cdp_rx_offld_ops - mcl host receive offload ops
Manjunathappa Prakash56023f52018-03-28 20:05:56 -07001559 * @register_rx_offld_flush_cb:
1560 * @deregister_rx_offld_flush_cb:
1561 */
1562struct cdp_rx_offld_ops {
1563 void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
1564 void (*deregister_rx_offld_flush_cb)(void);
1565};
1566#endif
1567
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301568struct cdp_ops {
1569 struct cdp_cmn_ops *cmn_drv_ops;
1570 struct cdp_ctrl_ops *ctrl_ops;
1571 struct cdp_me_ops *me_ops;
1572 struct cdp_mon_ops *mon_ops;
1573 struct cdp_host_stats_ops *host_stats_ops;
1574 struct cdp_wds_ops *wds_ops;
1575 struct cdp_raw_ops *raw_ops;
1576 struct cdp_pflow_ops *pflow_ops;
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301577#ifdef DP_PEER_EXTENDED_API
Leo Changdb6358c2016-09-27 17:00:52 -07001578 struct cdp_misc_ops *misc_ops;
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301579 struct cdp_peer_ops *peer_ops;
1580 struct cdp_ocb_ops *ocb_ops;
1581 struct cdp_mob_stats_ops *mob_stats_ops;
1582 struct cdp_pmf_ops *pmf_ops;
1583#endif
1584#ifdef DP_FLOW_CTL
Leo Changdb6358c2016-09-27 17:00:52 -07001585 struct cdp_cfg_ops *cfg_ops;
1586 struct cdp_flowctl_ops *flowctl_ops;
1587 struct cdp_lflowctl_ops *l_flowctl_ops;
Vevek Venkatesande31ff62019-06-11 12:50:49 +05301588 struct cdp_throttle_ops *throttle_ops;
1589#endif
1590#ifdef DP_POWER_SAVE
1591 struct cdp_bus_ops *bus_ops;
1592 struct cdp_tx_delay_ops *delay_ops;
1593#endif
Yun Parkfde6b9e2017-06-26 17:13:11 -07001594#ifdef IPA_OFFLOAD
Leo Changdb6358c2016-09-27 17:00:52 -07001595 struct cdp_ipa_ops *ipa_ops;
Yun Parkfde6b9e2017-06-26 17:13:11 -07001596#endif
Manjunathappa Prakash56023f52018-03-28 20:05:56 -07001597#ifdef RECEIVE_OFFLOAD
1598 struct cdp_rx_offld_ops *rx_offld_ops;
1599#endif
Alok Kumar3d15ae82019-08-15 20:56:40 +05301600#ifdef WLAN_FEATURE_PKT_CAPTURE
1601 struct cdp_pktcapture_ops *pktcapture_ops;
1602#endif
1603
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301604};
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301605#endif