blob: c35002d86a33372990e7a4d29ae735757c94a871 [file] [log] [blame]
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05301/*
Kai Chen6eca1a62017-01-12 10:17:53 -08002 * Copyright (c) 2017 The Linux Foundation. All rights reserved.
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +05303 *
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/**
21 * @file cdp_txrx_ops.h
22 * @brief Define the host data path converged API functions
23 * called by the host control SW and the OS interface module
24 */
25#ifndef _CDP_TXRX_CMN_OPS_H_
26#define _CDP_TXRX_CMN_OPS_H_
27
28
29#include <cdp_txrx_cmn_struct.h>
Nandha Kishore Easwaranfd7832e2016-11-20 18:22:48 +053030#ifdef CONFIG_WIN
31#include <cdp_txrx_stats_struct.h>
32#endif
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080033#include "cdp_txrx_handle.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080034#include <cdp_txrx_mon_struct.h>
Pramod Simha7f7b4aa2017-03-27 14:48:09 -070035#include "wlan_objmgr_psoc_obj.h"
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053036
37/******************************************************************************
38 *
39 * Control Interface (A Interface)
40 *
41 *****************************************************************************/
42
43struct cdp_cmn_ops {
44
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080045 int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053046
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080047 int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053048
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080049 struct cdp_vdev *(*txrx_vdev_attach)
50 (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053051 uint8_t vdev_id, enum wlan_op_mode op_mode);
52
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080053 void (*txrx_vdev_detach)
54 (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053055 void *cb_context);
56
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080057 struct cdp_pdev *(*txrx_pdev_attach)
58 (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053059 HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
60
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080061 int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
Leo Changdb6358c2016-09-27 17:00:52 -070062
Himanshu Agarwalb7e3c982017-02-23 16:26:33 +053063 void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
64
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080065 void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053066
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080067 void *(*txrx_peer_create)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080068 (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053069
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080070 void (*txrx_peer_setup)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080071 (struct cdp_vdev *vdev_hdl, void *peer_hdl);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080072
73 void (*txrx_peer_teardown)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080074 (struct cdp_vdev *vdev_hdl, void *peer_hdl);
Dhanashri Atre6d90ef32016-11-10 16:27:38 -080075
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080076 void (*txrx_peer_delete)(void *peer);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053077
Pratik Gandhi8b8334b2017-03-09 17:41:40 +053078 int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
79 uint8_t smart_monitor);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053080
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080081 void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053082
83 void (*txrx_set_privacy_filters)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080084 (struct cdp_vdev *vdev, void *filter, uint32_t num);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053085
86 /********************************************************************
87 * Data Interface (B Interface)
88 ********************************************************************/
89
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080090 void (*txrx_vdev_register)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053091 void *osif_vdev, struct ol_txrx_ops *txrx_ops);
92
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080093 int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053094 qdf_nbuf_t tx_mgmt_frm, uint8_t type);
95
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -080096 int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +053097 qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
98 uint16_t chanfreq);
99
100 /**
101 * ol_txrx_mgmt_tx_cb - tx management delivery notification
102 * callback function
103 */
104
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800105 void (*txrx_mgmt_tx_cb_set)
106 (struct cdp_pdev *pdev, uint8_t type,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530107 ol_txrx_mgmt_tx_cb download_cb, ol_txrx_mgmt_tx_cb ota_ack_cb,
108 void *ctxt);
109
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800110 int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530111
112 /**
113 * ol_txrx_data_tx_cb - Function registered with the data path
114 * that is called when tx frames marked as "no free" are
115 * done being transmitted
116 */
117
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800118 void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530119 ol_txrx_data_tx_cb callback, void *ctxt);
120
121 /*******************************************************************
122 * Statistics and Debugging Interface (C Inteface)
123 ********************************************************************/
124
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800125 int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530126 int max_subfrms_amsdu);
127
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800128 A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
129 struct ol_txrx_stats_req *req,
Leo Changdb6358c2016-09-27 17:00:52 -0700130 bool per_vdev, bool response_expected);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530131
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800132 int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530133
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800134 void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530135 uint8_t cfg_stats_type, uint32_t cfg_val);
136
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800137 void (*txrx_print_level_set)(unsigned level);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530138
139 /**
140 * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
141 * @vdev: vdev handle
142 *
143 * Return: vdev mac address
144 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800145 uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530146
147 /**
148 * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
149 * vdev
150 * @vdev: vdev handle
151 *
152 * Return: Handle to struct qdf_mac_addr
153 */
154 struct qdf_mac_addr *
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800155 (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530156
157 /**
158 * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
159 * @vdev: vdev handle
160 *
161 * Return: Handle to pdev
162 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800163 struct cdp_pdev *(*txrx_get_pdev_from_vdev)
164 (struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530165
166 /**
167 * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
168 * @vdev: vdev handle
169 *
170 * Return: Handle to control pdev
171 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800172 struct cdp_cfg *
173 (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530174
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800175 struct cdp_vdev *
176 (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
177 uint8_t vdev_id);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530178
179 void (*txrx_soc_detach)(void *soc);
180
Karunakar Dasinenied1de122016-08-02 11:57:59 -0700181 int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
182 uint16_t tid, uint16_t batimeout, uint16_t buffersize,
183 uint16_t startseqnum);
184
185 void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
186 uint8_t *dialogtoken, uint16_t *statuscode,
187 uint16_t *buffersize, uint16_t *batimeout);
188
189 int (*delba_process)(void *peer_handle,
190 int tid, uint16_t reasoncode);
Ishank Jain1e7401c2017-02-17 15:38:39 +0530191
192 uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
193 uint16_t peer_id, uint8_t *mac_addr);
Ishank Jain949674c2017-02-27 17:09:29 +0530194
195 void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
196 uint8_t map_id);
197
Manikandan Mohane2fa8b72017-03-22 11:18:26 -0700198 void (*flush_cache_rx_queue)(void);
Ishank Jain949674c2017-02-27 17:09:29 +0530199 void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
200 uint8_t tos, uint8_t tid);
201
Ishank Jain6290a3c2017-03-21 10:49:39 +0530202 int (*txrx_stats)(struct cdp_vdev *vdev, enum cdp_stats stats);
Venkata Sharath Chandra Manchalaa405eb72017-03-06 14:35:00 -0800203
204 QDF_STATUS (*display_stats)(void *psoc, uint16_t value);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530205
206 void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
207
208 int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
Venkateswara Swamy Bandarua95b3242017-05-19 20:20:30 +0530209 QDF_STATUS (*txrx_intr_attach)(void *soc);
210 void (*txrx_intr_detach)(void *soc);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530211};
212
213struct cdp_ctrl_ops {
214
215 int
216 (*txrx_mempools_attach)(void *ctrl_pdev);
217 int
218 (*txrx_set_filter_neighbour_peers)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800219 struct cdp_pdev *pdev,
Pratik Gandhi8b8334b2017-03-09 17:41:40 +0530220 uint32_t val);
221 int
222 (*txrx_update_filter_neighbour_peers)(
223 struct cdp_pdev *pdev,
224 uint32_t cmd, uint8_t *macaddr);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530225 /**
226 * @brief set the safemode of the device
227 * @details
228 * This flag is used to bypass the encrypt and decrypt processes when
229 * send and receive packets. It works like open AUTH mode, HW will
230 * ctreate all packets as non-encrypt frames because no key installed.
231 * For rx fragmented frames,it bypasses all the rx defragmentaion.
232 *
233 * @param vdev - the data virtual device object
234 * @param val - the safemode state
235 * @return - void
236 */
237
238 void
239 (*txrx_set_safemode)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800240 struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530241 u_int32_t val);
242 /**
243 * @brief configure the drop unencrypted frame flag
244 * @details
245 * Rx related. When set this flag, all the unencrypted frames
246 * received over a secure connection will be discarded
247 *
248 * @param vdev - the data virtual device object
249 * @param val - flag
250 * @return - void
251 */
252 void
253 (*txrx_set_drop_unenc)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800254 struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530255 u_int32_t val);
256
257
258 /**
259 * @brief set the Tx encapsulation type of the VDEV
260 * @details
261 * This will be used to populate the HTT desc packet type field
262 * during Tx
263 * @param vdev - the data virtual device object
264 * @param val - the Tx encap type
265 * @return - void
266 */
267 void
268 (*txrx_set_tx_encap_type)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800269 struct cdp_vdev *vdev,
Nandha Kishore Easwaran870abda2016-11-16 17:37:19 +0530270 enum htt_cmn_pkt_type val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530271 /**
272 * @brief set the Rx decapsulation type of the VDEV
273 * @details
274 * This will be used to configure into firmware and hardware
275 * which format to decap all Rx packets into, for all peers under
276 * the VDEV.
277 * @param vdev - the data virtual device object
278 * @param val - the Rx decap mode
279 * @return - void
280 */
281 void
282 (*txrx_set_vdev_rx_decap_type)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800283 struct cdp_vdev *vdev,
Nandha Kishore Easwaran870abda2016-11-16 17:37:19 +0530284 enum htt_cmn_pkt_type val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530285
286 /**
287 * @brief get the Rx decapsulation type of the VDEV
288 *
289 * @param vdev - the data virtual device object
290 * @return - the Rx decap type
291 */
Nandha Kishore Easwaranfb0a7e52017-02-03 21:18:49 +0530292 enum htt_cmn_pkt_type
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800293 (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530294
295 /* Is this similar to ol_txrx_peer_state_update() in MCL */
296 /**
297 * @brief Update the authorize peer object at association time
298 * @details
299 * For the host-based implementation of rate-control, it
300 * updates the peer/node-related parameters within rate-control
301 * context of the peer at association.
302 *
303 * @param peer - pointer to the node's object
304 * @authorize - either to authorize or unauthorize peer
305 *
306 * @return none
307 */
308 void
309 (*txrx_peer_authorize)(void *peer,
310 u_int32_t authorize);
311
312 bool
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800313 (*txrx_set_inact_params)(struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530314 u_int16_t inact_check_interval,
315 u_int16_t inact_normal,
316 u_int16_t inact_overload);
317 bool
318 (*txrx_start_inact_timer)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800319 struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530320 bool enable);
321
322
323 /**
324 * @brief Set the overload status of the radio
325 * @details
326 * Set the overload status of the radio, updating the inactivity
327 * threshold and inactivity count for each node.
328 *
329 * @param pdev - the data physical device object
330 * @param overload - whether the radio is overloaded or not
331 */
332 void (*txrx_set_overload)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800333 struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530334 bool overload);
335 /**
336 * @brief Check the inactivity status of the peer/node
337 *
338 * @param peer - pointer to the node's object
339 * @return true if the node is inactive; otherwise return false
340 */
341 bool
342 (*txrx_peer_is_inact)(void *peer);
343
344 /**
345 * @brief Mark inactivity status of the peer/node
346 * @details
347 * If it becomes active, reset inactivity count to reload value;
348 * if the inactivity status changed, notify umac band steering.
349 *
350 * @param peer - pointer to the node's object
351 * @param inactive - whether the node is inactive or not
352 */
353 void (*txrx_mark_peer_inact)(
354 void *peer,
355 bool inactive);
356
357
358 /* Should be ol_txrx_ctrl_api.h */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800359 void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530360
Venkateswara Swamy Bandaruec4f8e62017-03-07 11:04:28 +0530361 /**
362 * @brief setting mesh rx filter
363 * @details
364 * based on the bits enabled in the filter packets has to be dropped.
365 *
366 * @param vdev - the data virtual device object
367 * @param val - value to set
368 */
369 void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
370
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800371 void (*tx_flush_buffers)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530372
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800373 int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530374
Ishank Jain9f174c62017-03-30 18:37:42 +0530375 void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
376 enum cdp_vdev_param_type param, uint32_t val);
377
378 void (*txrx_peer_set_nawds)(void *peer, uint8_t value);
Tallapragada Kalyanfd1edcc2017-03-07 19:34:29 +0530379 /**
380 * @brief Set the reo dest ring num of the radio
381 * @details
382 * Set the reo destination ring no on which we will receive
383 * pkts for this radio.
384 *
385 * @param pdev - the data physical device object
386 * @param reo_dest_ring_num - value ranges between 1 - 4
387 */
388 void (*txrx_set_pdev_reo_dest)(
389 struct cdp_pdev *pdev,
390 enum cdp_host_reo_dest_ring reo_dest_ring_num);
391
392 /**
393 * @brief Get the reo dest ring num of the radio
394 * @details
395 * Get the reo destination ring no on which we will receive
396 * pkts for this radio.
397 *
398 * @param pdev - the data physical device object
399 * @return the reo destination ring number
400 */
401 enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
402 struct cdp_pdev *pdev);
Nandha Kishore Easwaran26689942017-04-17 16:52:46 +0530403
404 int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
405 uint32_t event);
406
407 int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
408 uint32_t event);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530409};
410
411struct cdp_me_ops {
412
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530413 u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800414 (struct cdp_pdev *pdev, u_int16_t buf_count);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530415
416 u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800417 struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530418 u_int16_t buf_count);
419
420 u_int16_t
421 (*tx_get_mcast_buf_allocated_marked)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800422 (struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530423 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800424 (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530425
426 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800427 (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530428
429 uint16_t
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800430 (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530431 qdf_nbuf_t wbuf, u_int8_t newmac[][6],
432 uint8_t newmaccnt);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530433 /* Should be a function pointer in ol_txrx_osif_ops{} */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530434 /**
435 * @brief notify mcast frame indication from FW.
436 * @details
437 * This notification will be used to convert
438 * multicast frame to unicast.
439 *
440 * @param pdev - handle to the ctrl SW's physical device object
441 * @param vdev_id - ID of the virtual device received the special data
442 * @param msdu - the multicast msdu returned by FW for host inspect
443 */
444
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800445 int (*mcast_notify)(struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530446 u_int8_t vdev_id, qdf_nbuf_t msdu);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530447};
448
449struct cdp_mon_ops {
450
451 void (*txrx_monitor_set_filter_ucast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800452 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530453 void (*txrx_monitor_set_filter_mcast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800454 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530455 void (*txrx_monitor_set_filter_non_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800456 (struct cdp_pdev *, u_int8_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530457
458 u_int8_t (*txrx_monitor_get_filter_ucast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800459 (struct cdp_vdev *vdev_txrx_handle);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530460 u_int8_t (*txrx_monitor_get_filter_mcast_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800461 (struct cdp_vdev *vdev_txrx_handle);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530462 u_int8_t (*txrx_monitor_get_filter_non_data)
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800463 (struct cdp_vdev *vdev_txrx_handle);
464 int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530465
466};
467
468struct cdp_host_stats_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800469 int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
Ishank Jain6290a3c2017-03-21 10:49:39 +0530470 struct ol_txrx_stats_req *req);
471
472 void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530473
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800474 void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530475
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800476 int (*txrx_stats_publish)(struct cdp_pdev *pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700477 void *buf);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530478 /**
479 * @brief Enable enhanced stats functionality.
480 *
481 * @param pdev - the physical device object
482 * @return - void
483 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800484 void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530485
486 /**
487 * @brief Disable enhanced stats functionality.
488 *
489 * @param pdev - the physical device object
490 * @return - void
491 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800492 void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530493
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530494 /**
495 * @brief Get the desired stats from the message.
496 *
497 * @param pdev - the physical device object
498 * @param stats_base - stats buffer recieved from FW
499 * @param type - stats type.
500 * @return - pointer to requested stat identified by type
501 */
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800502 uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530503 uint32_t *stats_base, uint32_t msg_len, uint8_t type);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530504 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800505 (*tx_print_tso_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530506
507 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800508 (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530509
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530510 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800511 (*tx_print_sg_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530512
513 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800514 (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530515
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530516 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800517 (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530518
519 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800520 (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530521
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530522 A_STATUS
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800523 (*txrx_host_me_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530524 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800525 (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
526 int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530527 struct ol_txrx_stats_req *req);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530528
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530529 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800530 (*print_lro_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530531
532 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800533 (*reset_lro_stats)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530534
Ishank Jain6290a3c2017-03-21 10:49:39 +0530535 void
536 (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
537 uint32_t cap);
538
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530539};
540
541struct cdp_wds_ops {
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530542 void
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800543 (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530544 u_int32_t val);
Karunakar Dasinenica792542017-01-16 10:08:58 -0800545 int (*vdev_set_wds)(void *vdev, uint32_t val);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530546};
547
548struct cdp_raw_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800549 int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530550
Venkateswara Swamy Bandaru45f85562017-02-20 18:28:40 +0530551 void (*rsim_get_astentry)(struct cdp_vdev *vdev,
552 qdf_nbuf_t *pnbuf,
553 struct cdp_raw_ast *raw_ast);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530554};
555
Leo Changdb6358c2016-09-27 17:00:52 -0700556#ifdef CONFIG_WIN
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530557struct cdp_pflow_ops {
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530558 uint32_t(*pflow_update_pdev_params)(void *,
559 ol_ath_param_t, uint32_t, void *);
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530560};
Leo Changdb6358c2016-09-27 17:00:52 -0700561#endif /* CONFIG_WIN */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530562
Dhanashri Atre14049172016-11-11 18:32:36 -0800563#define LRO_IPV4_SEED_ARR_SZ 5
564#define LRO_IPV6_SEED_ARR_SZ 11
565
566/**
567 * struct cdp_lro_config - set LRO init parameters
568 * @lro_enable: indicates whether lro is enabled
569 * @tcp_flag: If the TCP flags from the packet do not match
570 * the values in this field after masking with TCP flags mask
571 * below, packet is not LRO eligible
572 * @tcp_flag_mask: field for comparing the TCP values provided
573 * above with the TCP flags field in the received packet
574 * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
575 * 5-tuple toeplitz hash for ipv4 packets
576 * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
577 * 5-tuple toeplitz hash for ipv6 packets
578 */
579struct cdp_lro_hash_config {
580 uint32_t lro_enable;
581 uint32_t tcp_flag:9,
582 tcp_flag_mask:9;
583 uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
584 uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
585};
586
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530587struct ol_if_ops {
588 void (*peer_set_default_routing)(void *scn_handle,
589 uint8_t *peer_macaddr, uint8_t vdev_id,
590 bool hash_based, uint8_t ring_num);
Kiran Venkatappa9edb9612017-03-16 11:37:35 +0530591 int (*peer_rx_reorder_queue_setup)(void *scn_handle,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530592 uint8_t vdev_id, uint8_t *peer_mac,
593 qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
Kiran Venkatappa9edb9612017-03-16 11:37:35 +0530594 int (*peer_rx_reorder_queue_remove)(void *scn_handle,
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530595 uint8_t vdev_id, uint8_t *peer_macaddr,
596 uint32_t tid_mask);
Kiran Venkatappa9edb9612017-03-16 11:37:35 +0530597 int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
Vijay Pamidipatida061162017-01-17 12:53:05 +0530598 uint8_t *peer_macaddr);
Pramod Simha7f7b4aa2017-03-27 14:48:09 -0700599 bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
Ishank Jain2bf04b42017-02-23 22:38:42 +0530600 int (*peer_add_wds_entry)(void *ol_soc_handle,
601 const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
602 uint32_t flags);
603 int (*peer_update_wds_entry)(void *ol_soc_handle,
604 uint8_t *dest_macaddr, uint8_t *peer_macaddr,
605 uint32_t flags);
606 void (*peer_del_wds_entry)(void *ol_soc_handle,
607 uint8_t *wds_macaddr);
Dhanashri Atre14049172016-11-11 18:32:36 -0800608 QDF_STATUS (*lro_hash_config)(void *scn_handle,
609 struct cdp_lro_hash_config *lro_hash);
Ishank Jain1e7401c2017-02-17 15:38:39 +0530610 void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
611 uint8_t type);
Ishank Jain9f174c62017-03-30 18:37:42 +0530612 uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg);
Ishank Jain1e7401c2017-02-17 15:38:39 +0530613
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530614 int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
615 uint8_t vdev_id, uint8_t *peer_mac_addr);
616 int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
617
Pamidipati, Vijay6b0d2a82017-06-09 04:46:32 +0530618 int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
Bharat Kumar M9a5d5372017-05-08 17:41:42 +0530619
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530620 /* TODO: Add any other control path calls required to OL_IF/WMA layer */
621};
622
Leo Changdb6358c2016-09-27 17:00:52 -0700623#ifndef CONFIG_WIN
624/* From here MCL specific OPs */
625/**
626 * struct cdp_misc_ops - mcl ops not classified
627 * @set_ibss_vdev_heart_beat_timer:
628 * @bad_peer_txctl_set_setting:
629 * @bad_peer_txctl_update_threshold:
630 * @hl_tdls_flag_reset:
631 * @tx_non_std:
632 * @get_vdev_id:
633 * @set_wisa_mode:
634 * @runtime_suspend:
635 * @runtime_resume:
636 */
637struct cdp_misc_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800638 uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700639 uint16_t timer_value_sec);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800640 void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700641 struct ol_tx_wmm_param_t wmm_param);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800642 void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
Leo Changdb6358c2016-09-27 17:00:52 -0700643 int period, int txq_limit);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800644 void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700645 int level, int tput_thresh, int tx_limit);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800646 void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
647 qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700648 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800649 uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
650 QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
651 QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
652 QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
653 int (*get_opmode)(struct cdp_vdev *vdev);
Leo Changdb6358c2016-09-27 17:00:52 -0700654 void (*mark_first_wakeup_packet)(uint8_t value);
655 void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
656 void (*flush_rx_frames)(void *peer, bool drop);
657 A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
658 uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800659 void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
660 void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
Leo Changdb6358c2016-09-27 17:00:52 -0700661};
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530662
Leo Changdb6358c2016-09-27 17:00:52 -0700663/**
664 * struct cdp_tx_delay_ops - mcl tx delay ops
665 * @tx_delay:
666 * @tx_delay_hist:
667 * @tx_packet_count:
668 * @tx_set_compute_interval:
669 */
670struct cdp_tx_delay_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800671 void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
Leo Changdb6358c2016-09-27 17:00:52 -0700672 uint32_t *tx_delay_microsec, int category);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800673 void (*tx_delay_hist)(struct cdp_pdev *pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700674 uint16_t *bin_values, int category);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800675 void (*tx_packet_count)(struct cdp_pdev *pdev,
676 uint16_t *out_packet_count,
Leo Changdb6358c2016-09-27 17:00:52 -0700677 uint16_t *out_packet_loss_count, int category);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800678 void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
679 uint32_t interval);
Leo Changdb6358c2016-09-27 17:00:52 -0700680};
681
682/**
683 * struct cdp_pmf_ops - mcl protected management frame ops
684 * @get_pn_info:
685 */
686struct cdp_pmf_ops {
687 void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
688 uint64_t **last_pn, uint32_t **rmf_pn_replays);
689};
690
691/**
692 * struct cdp_cfg_ops - mcl configuration ops
693 * @set_cfg_rx_fwd_disabled:
694 * @set_cfg_packet_log_enabled:
695 * @cfg_attach:
696 * @vdev_rx_set_intrabss_fwd:
697 * @get_opmode:
698 * @is_rx_fwd_disabled:
699 * @tx_set_is_mgmt_over_wmi_enabled:
700 * @is_high_latency:
701 * @set_flow_control_parameters:
702 */
703struct cdp_cfg_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800704 void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
705 uint8_t disable_rx_fwd);
706 void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
707 uint8_t val);
708 struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
709 void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
710 uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
Leo Changdb6358c2016-09-27 17:00:52 -0700711 void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800712 int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
713 void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
714 void *param);
715 void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
Leo Changdb6358c2016-09-27 17:00:52 -0700716};
717
718/**
719 * struct cdp_flowctl_ops - mcl flow control
720 * @register_pause_cb:
721 * @set_desc_global_pool_size:
722 * @dump_flow_pool_info:
723 */
724struct cdp_flowctl_ops {
725 QDF_STATUS (*register_pause_cb)(ol_tx_pause_callback_fp);
726
727 void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
728 void (*dump_flow_pool_info)(void);
729};
730
731/**
732 * struct cdp_lflowctl_ops - mcl legacy flow control ops
733 * @register_tx_flow_control:
734 * @deregister_tx_flow_control_cb:
735 * @flow_control_cb:
736 * @get_tx_resource:
737 * @ll_set_tx_pause_q_depth:
738 * @vdev_flush:
739 * @vdev_pause:
740 * @vdev_unpause:
741 */
742struct cdp_lflowctl_ops {
743 int (*register_tx_flow_control)(uint8_t vdev_id,
744 ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx);
745 int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800746 void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
Leo Changdb6358c2016-09-27 17:00:52 -0700747 bool (*get_tx_resource)(uint8_t sta_id,
748 unsigned int low_watermark,
749 unsigned int high_watermark_offset);
750 int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800751 void (*vdev_flush)(struct cdp_vdev *vdev);
752 void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
753 void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
Leo Changdb6358c2016-09-27 17:00:52 -0700754};
755
756/**
757 * struct cdp_ipa_ops - mcl ipa data path ops
758 * @ipa_get_resource:
759 * @ipa_set_doorbell_paddr:
760 * @ipa_set_active:
761 * @ipa_op_response:
762 * @ipa_register_op_cb:
763 * @ipa_get_stat:
764 * @ipa_tx_data_frame:
765 */
766struct cdp_ipa_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800767 void (*ipa_get_resource)(struct cdp_pdev *pdev,
Yun Park6716dff2016-10-26 14:34:49 -0700768 struct ol_txrx_ipa_resources *ipa_res);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800769 void (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev,
Yun Park6716dff2016-10-26 14:34:49 -0700770 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
771 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800772 void (*ipa_set_active)(struct cdp_pdev *pdev,
773 bool uc_active, bool is_tx);
774 void (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
775 void (*ipa_register_op_cb)(struct cdp_pdev *pdev,
Yun Park6716dff2016-10-26 14:34:49 -0700776 void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
777 void *osif_dev);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800778 void (*ipa_get_stat)(struct cdp_pdev *pdev);
779 qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
780 void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *cfg_pdev,
781 uint32_t value);
Yun Park6716dff2016-10-26 14:34:49 -0700782 void (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
783 uint8_t reset_stats);
784 void (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
785 uint64_t quota_bytes);
Leo Changdb6358c2016-09-27 17:00:52 -0700786};
787
788/**
Leo Changdb6358c2016-09-27 17:00:52 -0700789 * struct cdp_bus_ops - mcl bus suspend/resume ops
790 * @bus_suspend:
791 * @bus_resume:
792 */
793struct cdp_bus_ops {
Dustin Brown4a3b96b2017-05-10 15:49:38 -0700794 QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
795 QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
Leo Changdb6358c2016-09-27 17:00:52 -0700796};
797
798/**
799 * struct cdp_ocb_ops - mcl ocb ops
800 * @set_ocb_chan_info:
801 * @get_ocb_chan_info:
802 */
803struct cdp_ocb_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800804 void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
805 struct ol_txrx_ocb_set_chan ocb_set_chan);
806 struct ol_txrx_ocb_chan_info *
807 (*get_ocb_chan_info)(struct cdp_vdev *vdev);
Leo Changdb6358c2016-09-27 17:00:52 -0700808};
809
810/**
811 * struct cdp_peer_ops - mcl peer related ops
812 * @register_peer:
813 * @clear_peer:
814 * @cfg_attach:
815 * @find_peer_by_addr:
816 * @find_peer_by_addr_and_vdev:
817 * @local_peer_id:
818 * @peer_find_by_local_id:
819 * @peer_state_update:
820 * @get_vdevid:
821 * @get_vdev_by_sta_id:
822 * @register_ocb_peer:
823 * @peer_get_peer_mac_addr:
824 * @get_peer_state:
825 * @get_vdev_for_peer:
826 * @update_ibss_add_peer_num_of_vdev:
827 * @remove_peers_for_vdev:
828 * @remove_peers_for_vdev_no_lock:
829 * @copy_mac_addr_raw:
830 * @add_last_real_peer:
831 * @last_assoc_received:
832 * @last_disassoc_received:
833 * @last_deauth_received:
834 * @is_vdev_restore_last_peer:
835 * @update_last_real_peer:
836 */
837struct cdp_peer_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800838 QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700839 struct ol_txrx_desc_type *sta_desc);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800840 QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
Leo Changdb6358c2016-09-27 17:00:52 -0700841 QDF_STATUS (*change_peer_state)(uint8_t sta_id,
842 enum ol_txrx_peer_state sta_state,
843 bool roam_synch_in_progress);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800844 void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700845 uint8_t *peer_addr, uint8_t *peer_id);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800846 void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
847 struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700848 uint8_t *peer_addr, uint8_t *peer_id);
849 uint16_t (*local_peer_id)(void *peer);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800850 void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
851 uint8_t local_peer_id);
852 QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
853 uint8_t *peer_addr,
Leo Changdb6358c2016-09-27 17:00:52 -0700854 enum ol_txrx_peer_state state);
855 QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800856 struct cdp_vdev * (*get_vdev_by_sta_id)(uint8_t sta_id);
Leo Changdb6358c2016-09-27 17:00:52 -0700857 QDF_STATUS (*register_ocb_peer)(void *cds_ctx, uint8_t *mac_addr,
858 uint8_t *peer_id);
859 uint8_t * (*peer_get_peer_mac_addr)(void *peer);
860 int (*get_peer_state)(void *peer);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800861 struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
862 int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700863 int16_t peer_num_delta);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800864 void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700865 ol_txrx_vdev_peer_remove_cb callback,
866 void *callback_context, bool remove_last_peer);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800867 void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
Leo Changdb6358c2016-09-27 17:00:52 -0700868 ol_txrx_vdev_peer_remove_cb callback,
869 void *callback_context);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800870 void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
871 void (*add_last_real_peer)(struct cdp_pdev *pdev,
872 struct cdp_vdev *vdev, uint8_t *peer_id);
Leo Changdb6358c2016-09-27 17:00:52 -0700873 qdf_time_t * (*last_assoc_received)(void *peer);
874 qdf_time_t * (*last_disassoc_received)(void *peer);
875 qdf_time_t * (*last_deauth_received)(void *peer);
876 bool (*is_vdev_restore_last_peer)(void *peer);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800877 void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
Leo Changdb6358c2016-09-27 17:00:52 -0700878 uint8_t *peer_id, bool restore_last_peer);
879 void (*peer_detach_force_delete)(void *peer);
880};
881
882/**
883 * struct cdp_ocb_ops - mcl ocb ops
884 * @throttle_init_period:
885 * @throttle_set_level:
886 */
887struct cdp_throttle_ops {
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800888 void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
Leo Changdb6358c2016-09-27 17:00:52 -0700889 uint8_t *dutycycle_level);
Venkata Sharath Chandra Manchalaf2a125a2016-11-28 18:10:11 -0800890 void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
Leo Changdb6358c2016-09-27 17:00:52 -0700891};
892
893/**
894 * struct cdp_ocb_ops - mcl ocb ops
Leo Changdb6358c2016-09-27 17:00:52 -0700895 * @clear_stats:
896 * @stats:
897 */
898struct cdp_mob_stats_ops {
Leo Changdb6358c2016-09-27 17:00:52 -0700899 void (*clear_stats)(uint16_t bitmap);
900 int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
901};
902#endif /* CONFIG_WIN */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530903
904struct cdp_ops {
905 struct cdp_cmn_ops *cmn_drv_ops;
906 struct cdp_ctrl_ops *ctrl_ops;
907 struct cdp_me_ops *me_ops;
908 struct cdp_mon_ops *mon_ops;
909 struct cdp_host_stats_ops *host_stats_ops;
910 struct cdp_wds_ops *wds_ops;
911 struct cdp_raw_ops *raw_ops;
912 struct cdp_pflow_ops *pflow_ops;
Leo Changdb6358c2016-09-27 17:00:52 -0700913#ifndef CONFIG_WIN
914 struct cdp_misc_ops *misc_ops;
915 struct cdp_cfg_ops *cfg_ops;
916 struct cdp_flowctl_ops *flowctl_ops;
917 struct cdp_lflowctl_ops *l_flowctl_ops;
918 struct cdp_ipa_ops *ipa_ops;
Leo Changdb6358c2016-09-27 17:00:52 -0700919 struct cdp_bus_ops *bus_ops;
920 struct cdp_ocb_ops *ocb_ops;
921 struct cdp_peer_ops *peer_ops;
922 struct cdp_throttle_ops *throttle_ops;
923 struct cdp_mob_stats_ops *mob_stats_ops;
924 struct cdp_tx_delay_ops *delay_ops;
925 struct cdp_pmf_ops *pmf_ops;
926#endif /* CONFIG_WIN */
Nandha Kishore Easwarane5444bc2016-10-20 13:23:23 +0530927};
928
929#endif