blob: 79b830e3a6c1cee30182e056a9f3aaa3a5aa8bf3 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * @file ol_txrx_ctrl_api.h
30 * @brief Define the host data API functions called by the host control SW.
31 */
32#ifndef _OL_TXRX_CTRL_API__H_
33#define _OL_TXRX_CTRL_API__H_
34
35#include <athdefs.h> /* A_STATUS */
Nirav Shahcbc6d722016-03-01 16:24:53 +053036#include <qdf_nbuf.h> /* qdf_nbuf_t */
Anurag Chouhan6d760662016-02-20 16:05:43 +053037#include <qdf_types.h> /* qdf_device_t */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080038#include <htc_api.h> /* HTC_HANDLE */
39
40#include <ol_osif_api.h> /* ol_osif_vdev_handle */
41#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
42#include <ol_ctrl_api.h> /* ol_pdev_handle, ol_vdev_handle */
43
44#include <wlan_defs.h> /* MAX_SPATIAL_STREAM */
45
46#define OL_ATH_TX_DRAIN_WAIT_DELAY 50
47
48/* Maximum number of station supported by data path, including BC. */
49#define WLAN_MAX_STA_COUNT (HAL_NUM_STA)
50
51/* The symbolic station ID return to HDD to specify the packet is bc/mc */
52#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
53
54/* The symbolic station ID return to HDD to specify the packet is
55 to soft-AP itself */
56#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
57
58/**
59 * enum wlan_op_mode - Virtual device operation mode
60 *
61 * @wlan_op_mode_unknown: Unknown mode
62 * @wlan_op_mode_ap: AP mode
63 * @wlan_op_mode_ibss: IBSS mode
64 * @wlan_op_mode_sta: STA (client) mode
65 * @wlan_op_mode_monitor: Monitor mode
66 * @wlan_op_mode_ocb: OCB mode
67 */
68enum wlan_op_mode {
69 wlan_op_mode_unknown,
70 wlan_op_mode_ap,
71 wlan_op_mode_ibss,
72 wlan_op_mode_sta,
73 wlan_op_mode_monitor,
74 wlan_op_mode_ocb,
75};
76
77#define OL_TXQ_PAUSE_REASON_FW (1 << 0)
78#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
79#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2)
80#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3)
81#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
82
83
84/**
85 * enum netif_action_type - Type of actions on netif queues
86 * @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
87 * @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
88 * @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
89 * @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
90 * @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
91 * @WLAN_NETIF_TX_DISABLE: disable tx
92 * @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
93 * @WLAN_NETIF_CARRIER_ON: on carrier
94 * @WLAN_NETIF_CARRIER_OFF: off carrier
95 */
96enum netif_action_type {
97 WLAN_STOP_ALL_NETIF_QUEUE,
98 WLAN_START_ALL_NETIF_QUEUE,
99 WLAN_WAKE_ALL_NETIF_QUEUE,
100 WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
101 WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
102 WLAN_NETIF_TX_DISABLE,
103 WLAN_NETIF_TX_DISABLE_N_CARRIER,
104 WLAN_NETIF_CARRIER_ON,
105 WLAN_NETIF_CARRIER_OFF,
106 WLAN_NETIF_ACTION_TYPE_MAX,
107};
108
109/**
110 * enum netif_reason_type - reason for netif queue action
111 * @WLAN_CONTROL_PATH: action from control path
112 * @WLAN_DATA_FLOW_CONTROL: because of flow control
113 * @WLAN_FW_PAUSE: because of firmware pause
114 * @WLAN_TX_ABORT: because of tx abort
115 * @WLAN_VDEV_STOP: because of vdev stop
116 * @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
117 * @WLAN_THERMAL_MITIGATION: because of thermal mitigation
118 */
119enum netif_reason_type {
120 WLAN_CONTROL_PATH,
121 WLAN_DATA_FLOW_CONTROL,
122 WLAN_FW_PAUSE,
123 WLAN_TX_ABORT,
124 WLAN_VDEV_STOP,
125 WLAN_PEER_UNAUTHORISED,
126 WLAN_THERMAL_MITIGATION,
127 WLAN_REASON_TYPE_MAX,
128};
129
130
131/* command options for dumpStats*/
132#define WLAN_HDD_STATS 0
133#define WLAN_TXRX_STATS 1
134#define WLAN_TXRX_HIST_STATS 2
135#define WLAN_TXRX_TSO_STATS 3
136#define WLAN_HDD_NETIF_OPER_HISTORY 4
137#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
138#define WLAN_TXRX_DESC_STATS 6
139
140ol_txrx_pdev_handle
141ol_txrx_pdev_alloc(ol_pdev_handle ctrl_pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530142 HTC_HANDLE htc_pdev, qdf_device_t osdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143
144/**
145 * @brief Set up the data SW subsystem.
146 * @details
147 * As part of the WLAN device attach, the data SW subsystem has
148 * to be attached as a component within the WLAN device.
149 * This attach allocates and initializes the physical device object
150 * used by the data SW.
151 * The data SW subsystem attach needs to happen after the target has
152 * be started, and host / target parameter negotiation has completed,
153 * since the host data SW uses some of these host/target negotiated
154 * parameters (e.g. peer ID range) during the initializations within
155 * its attach function.
156 * However, the host data SW is not allowed to send HTC messages to the
157 * target within this pdev_attach function call, since the HTC setup
158 * has not complete at this stage of initializations. Any messaging
159 * to the target has to be done in the separate pdev_attach_target call
160 * that is invoked after HTC setup is complete.
161 *
162 * @param pdev - txrx_pdev handle
163 * @return 0 for success or error code
164 */
165int
166ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev);
167
168/**
169 * @brief Do final steps of data SW setup that send messages to the target.
170 * @details
171 * The majority of the data SW setup are done by the pdev_attach function,
172 * but this function completes the data SW setup by sending datapath
173 * configuration messages to the target.
174 *
175 * @param data_pdev - the physical device being initialized
176 */
177A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle data_pdev);
178
179/**
180 * @brief Allocate and initialize the data object for a new virtual device.
181 * @param data_pdev - the physical device the virtual device belongs to
182 * @param vdev_mac_addr - the MAC address of the virtual device
183 * @param vdev_id - the ID used to identify the virtual device to the target
184 * @param op_mode - whether this virtual device is operating as an AP,
185 * an IBSS, or a STA
186 * @return
187 * success: handle to new data vdev object, -OR-
188 * failure: NULL
189 */
190ol_txrx_vdev_handle
191ol_txrx_vdev_attach(ol_txrx_pdev_handle data_pdev,
192 uint8_t *vdev_mac_addr,
193 uint8_t vdev_id, enum wlan_op_mode op_mode);
194
195/**
196 * @brief Allocate and set up references for a data peer object.
197 * @details
198 * When an association with a peer starts, the host's control SW
199 * uses this function to inform the host data SW.
200 * The host data SW allocates its own peer object, and stores a
201 * reference to the control peer object within the data peer object.
202 * The host data SW also stores a reference to the virtual device
203 * that the peer is associated with. This virtual device handle is
204 * used when the data SW delivers rx data frames to the OS shim layer.
205 * The host data SW returns a handle to the new peer data object,
206 * so a reference within the control peer object can be set to the
207 * data peer object.
208 *
209 * @param data_pdev - data physical device object that will indirectly
210 * own the data_peer object
211 * @param data_vdev - data virtual device object that will directly
212 * own the data_peer object
213 * @param peer_mac_addr - MAC address of the new peer
214 * @return handle to new data peer object, or NULL if the attach fails
215 */
216ol_txrx_peer_handle
217ol_txrx_peer_attach(ol_txrx_pdev_handle data_pdev,
218 ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac_addr);
219
220/**
221 * @brief Parameter type to be input to ol_txrx_peer_update
222 * @details
223 * This struct is union,to be used to specify various informations to update
224 * txrx peer object.
225 */
226union ol_txrx_peer_update_param_t {
227 uint8_t qos_capable;
228 uint8_t uapsd_mask;
229 enum ol_sec_type sec_type;
230};
231
232/**
233 * @brief Parameter type to be input to ol_txrx_peer_update
234 * @details
235 * This enum is used to specify what exact information in
236 * ol_txrx_peer_update_param_t
237 * is used to update the txrx peer object.
238 */
239enum ol_txrx_peer_update_select_t {
240 ol_txrx_peer_update_qos_capable = 1,
241 ol_txrx_peer_update_uapsdMask,
242 ol_txrx_peer_update_peer_security,
243};
244
245/**
246 * @brief Update the data peer object as some informaiton changed in node.
247 * @details
248 * Only a single prarameter can be changed for each call to this func.
249 *
250 * @param peer - pointer to the node's object
251 * @param param - new param to be upated in peer object.
252 * @param select - specify what's parameter needed to be update
253 */
254void
255ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
256 union ol_txrx_peer_update_param_t *param,
257 enum ol_txrx_peer_update_select_t select);
258
259enum {
260 OL_TX_WMM_AC_BE,
261 OL_TX_WMM_AC_BK,
262 OL_TX_WMM_AC_VI,
263 OL_TX_WMM_AC_VO,
264
265 OL_TX_NUM_WMM_AC
266};
267
268/**
269 * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
270 * @details
271 * The struct is used to specify informaiton to update TX WMM scheduler.
272 */
273struct ol_tx_ac_param_t {
274 uint32_t aifs;
275 uint32_t cwmin;
276 uint32_t cwmax;
277};
278
279struct ol_tx_wmm_param_t {
280 struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
281};
282
283/**
284 * @brief Set paramters of WMM scheduler per AC settings. .
285 * @details
286 * This function applies only to HL systems.
287 *
288 * @param data_pdev - the physical device being paused
289 * @param wmm_param - the wmm parameters
290 */
291#define ol_txrx_set_wmm_param(data_pdev, wmm_param) /* no-op */
292
293/**
294 * @brief notify tx data SW that a peer's transmissions are suspended.
295 * @details
296 * This function applies only to HL systems - in LL systems, tx flow control
297 * is handled entirely within the target FW.
298 * The HL host tx data SW is doing tx classification and tx download
299 * scheduling, and therefore also needs to actively participate in tx
300 * flow control. Specifically, the HL tx data SW needs to check whether a
301 * given peer is available to transmit to, or is paused.
302 * This function is used to tell the HL tx data SW when a peer is paused,
303 * so the host tx data SW can hold the tx frames for that SW.
304 *
305 * @param data_peer - which peer is being paused
306 */
307#define ol_txrx_peer_pause(data_peer) /* no-op */
308
309/**
310 * @brief notify tx data SW that a peer-TID is ready to transmit to.
311 * @details
312 * This function applies only to HL systems - in LL systems, tx flow control
313 * is handled entirely within the target FW.
314 * If a peer-TID has tx paused, then the tx datapath will end up queuing
315 * any tx frames that arrive from the OS shim for that peer-TID.
316 * In a HL system, the host tx data SW itself will classify the tx frame,
317 * and determine that it needs to be queued rather than downloaded to the
318 * target for transmission.
319 * Once the peer-TID is ready to accept data, the host control SW will call
320 * this function to notify the host data SW that the queued frames can be
321 * enabled for transmission, or specifically to download the tx frames
322 * to the target to transmit.
323 * The TID parameter is an extended version of the QoS TID. Values 0-15
324 * indicate a regular QoS TID, and the value 16 indicates either non-QoS
325 * data, multicast data, or broadcast data.
326 *
327 * @param data_peer - which peer is being unpaused
328 * @param tid - which TID within the peer is being unpaused, or -1 as a
329 * wildcard to unpause all TIDs within the peer
330 */
331#define ol_txrx_peer_tid_unpause(data_peer, tid) /* no-op */
332
333/**
334 * @brief Tell a paused peer to release a specified number of tx frames.
335 * @details
336 * This function applies only to HL systems - in LL systems, tx flow control
337 * is handled entirely within the target FW.
338 * Download up to a specified maximum number of tx frames from the tx
339 * queues of the specified TIDs within the specified paused peer, usually
340 * in response to a U-APSD trigger from the peer.
341 * It is up to the host data SW to determine how to choose frames from the
342 * tx queues of the specified TIDs. However, the host data SW does need to
343 * provide long-term fairness across the U-APSD enabled TIDs.
344 * The host data SW will notify the target data FW when it is done downloading
345 * the batch of U-APSD triggered tx frames, so the target data FW can
346 * differentiate between an in-progress download versus a case when there are
347 * fewer tx frames available than the specified limit.
348 * This function is relevant primarily to HL U-APSD, where the frames are
349 * held in the host.
350 *
351 * @param peer - which peer sent the U-APSD trigger
352 * @param tid_mask - bitmask of U-APSD enabled TIDs from whose tx queues
353 * tx frames can be released
354 * @param max_frms - limit on the number of tx frames to release from the
355 * specified TID's queues within the specified peer
356 */
357#define ol_txrx_tx_release(peer, tid_mask, max_frms) /* no-op */
358
359/**
360 * @brief Suspend all tx data for the specified virtual device.
361 * @details
362 * This function applies primarily to HL systems, but also applies to
363 * LL systems that use per-vdev tx queues for MCC or thermal throttling.
364 * As an example, this function could be used when a single-channel physical
365 * device supports multiple channels by jumping back and forth between the
366 * channels in a time-shared manner. As the device is switched from channel
367 * A to channel B, the virtual devices that operate on channel A will be
368 * paused.
369 *
370 * @param data_vdev - the virtual device being paused
371 * @param reason - the reason for which vdev queue is getting paused
372 */
373#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
374void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason);
375#else
376static inline
377void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
378{
379 return;
380}
381#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
382
383/**
384 * @brief Drop all tx data for the specified virtual device.
385 * @details
386 * This function applies primarily to HL systems, but also applies to
387 * LL systems that use per-vdev tx queues for MCC or thermal throttling.
388 * This function would typically be used by the ctrl SW after it parks
389 * a STA vdev and then resumes it, but to a new AP. In this case, though
390 * the same vdev can be used, any old tx frames queued inside it would be
391 * stale, and would need to be discarded.
392 *
393 * @param data_vdev - the virtual device being flushed
394 */
395#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
396void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev);
397#else
398#define ol_txrx_vdev_flush(data_vdev) /* no-op */
399#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
400
401/**
402 * @brief Resume tx for the specified virtual device.
403 * @details
404 * This function applies primarily to HL systems, but also applies to
405 * LL systems that use per-vdev tx queues for MCC or thermal throttling.
406 *
407 * @param data_vdev - the virtual device being unpaused
408 * @param reason - the reason for which vdev queue is getting unpaused
409 */
410#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
411void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason);
412#else
413static inline
414void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason)
415{
416 return;
417}
418#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
419
420/**
421 * @brief Suspend all tx data per thermal event/timer for the
422 * specified physical device
423 * @details
424 * This function applies only to HL systerms, and it makes pause and
425 * unpause operations happen in pairs.
426 */
427#define ol_txrx_throttle_pause(data_pdev) /* no-op */
428
429/**
430 * @brief Resume all tx data per thermal event/timer for the
431 * specified physical device
432 * @details
433 * This function applies only to HL systerms, and it makes pause and
434 * unpause operations happen in pairs.
435 */
436#define ol_txrx_throttle_unpause(data_pdev) /* no-op */
437
438/**
439 * @brief Suspend all tx data for the specified physical device.
440 * @details
441 * This function applies only to HL systems - in LL systems, tx flow control
442 * is handled entirely within the target FW.
443 * In some systems it is necessary to be able to temporarily
444 * suspend all WLAN traffic, e.g. to allow another device such as bluetooth
445 * to temporarily have exclusive access to shared RF chain resources.
446 * This function suspends tx traffic within the specified physical device.
447 *
448 * @param data_pdev - the physical device being paused
449 */
450#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
451void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
452#else
453static inline
454void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason)
455{
456 return;
457}
458#endif
459
460/**
461 * @brief Resume tx for the specified physical device.
462 * @details
463 * This function applies only to HL systems - in LL systems, tx flow control
464 * is handled entirely within the target FW.
465 *
466 * @param data_pdev - the physical device being unpaused
467 */
468#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
469void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
470#else
471static inline
472void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
473{
474 return;
475}
476#endif
477
478/**
479 * @brief Synchronize the data-path tx with a control-path target download
480 * @dtails
481 * @param data_pdev - the data-path physical device object
482 * @param sync_cnt - after the host data-path SW downloads this sync request
483 * to the target data-path FW, the target tx data-path will hold itself
484 * in suspension until it is given an out-of-band sync counter value that
485 * is equal to or greater than this counter value
486 */
487void ol_txrx_tx_sync(ol_txrx_pdev_handle data_pdev, uint8_t sync_cnt);
488
489/**
490 * @brief Delete a peer's data object.
491 * @details
492 * When the host's control SW disassociates a peer, it calls this
493 * function to delete the peer's data object.
494 * The reference stored in the control peer object to the data peer
495 * object (set up by a call to ol_peer_store()) is provided.
496 *
497 * @param data_peer - the object to delete
498 */
499void ol_txrx_peer_detach(ol_txrx_peer_handle data_peer);
500
501typedef void (*ol_txrx_vdev_delete_cb)(void *context);
502
503/**
504 * @brief Deallocate the specified data virtual device object.
505 * @details
506 * All peers associated with the virtual device need to be deleted
507 * (ol_txrx_peer_detach) before the virtual device itself is deleted.
508 * However, for the peers to be fully deleted, the peer deletion has to
509 * percolate through the target data FW and back up to the host data SW.
510 * Thus, even though the host control SW may have issued a peer_detach
511 * call for each of the vdev's peers, the peer objects may still be
512 * allocated, pending removal of all references to them by the target FW.
513 * In this case, though the vdev_detach function call will still return
514 * immediately, the vdev itself won't actually be deleted, until the
515 * deletions of all its peers complete.
516 * The caller can provide a callback function pointer to be notified when
517 * the vdev deletion actually happens - whether it's directly within the
518 * vdev_detach call, or if it's deferred until all in-progress peer
519 * deletions have completed.
520 *
521 * @param data_vdev - data object for the virtual device in question
522 * @param callback - function to call (if non-NULL) once the vdev has
523 * been wholly deleted
524 * @param callback_context - context to provide in the callback
525 */
526void
527ol_txrx_vdev_detach(ol_txrx_vdev_handle data_vdev,
528 ol_txrx_vdev_delete_cb callback, void *callback_context);
529
530/**
531 * @brief Delete the data SW state.
532 * @details
533 * This function is used when the WLAN driver is being removed to
534 * remove the host data component within the driver.
535 * All virtual devices within the physical device need to be deleted
536 * (ol_txrx_vdev_detach) before the physical device itself is deleted.
537 *
538 * @param data_pdev - the data physical device object being removed
539 * @param force - delete the pdev (and its vdevs and peers) even if there
540 * are outstanding references by the target to the vdevs and peers
541 * within the pdev
542 */
543void ol_txrx_pdev_detach(ol_txrx_pdev_handle data_pdev, int force);
544
545typedef void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530546(*ol_txrx_data_tx_cb)(void *ctxt, qdf_nbuf_t tx_frm, int had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547
548/**
549 * @brief Store a delivery notification callback for specific data frames.
550 * @details
551 * Through a non-std tx function, the txrx SW can be given tx data frames
552 * that are specially marked to not be unmapped and freed by the tx SW
553 * when transmission completes. Rather, these specially-marked frames
554 * are provided to the callback registered with this function.
555 *
556 * @param data_vdev - which vdev the callback is being registered with
557 * (Currently the callback is stored in the pdev rather than the vdev.)
558 * @param callback - the function to call when tx frames marked as "no free"
559 * are done being transmitted
560 * @param ctxt - the context argument provided to the callback function
561 */
562void
563ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle data_vdev,
564 ol_txrx_data_tx_cb callback, void *ctxt);
565
566/**
567 * @brief Allow the control-path SW to send data frames.
568 * @details
569 * Generally, all tx data frames come from the OS shim into the txrx layer.
570 * However, there are rare cases such as TDLS messaging where the UMAC
571 * control-path SW creates tx data frames.
572 * This UMAC SW can call this function to provide the tx data frames to
573 * the txrx layer.
574 * The UMAC SW can request a callback for these data frames after their
575 * transmission completes, by using the ol_txrx_data_tx_cb_set function
576 * to register a tx completion callback, and by specifying
577 * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
578 * ol_tx_non_std.
579 * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
580 * as specified by ol_cfg_frame_type().
581 *
582 * @param data_vdev - which vdev should transmit the tx data frames
583 * @param tx_spec - what non-standard handling to apply to the tx data frames
584 * @param msdu_list - NULL-terminated list of tx MSDUs
585 */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530586qdf_nbuf_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587ol_tx_non_std(ol_txrx_vdev_handle data_vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530588 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800589
590typedef void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530591(*ol_txrx_mgmt_tx_cb)(void *ctxt, qdf_nbuf_t tx_mgmt_frm, int had_error);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592
593/**
594 * @brief Store a callback for delivery notifications for management frames.
595 * @details
596 * When the txrx SW receives notifications from the target that a tx frame
597 * has been delivered to its recipient, it will check if the tx frame
598 * is a management frame. If so, the txrx SW will check the management
599 * frame type specified when the frame was submitted for transmission.
600 * If there is a callback function registered for the type of managment
601 * frame in question, the txrx code will invoke the callback to inform
602 * the management + control SW that the mgmt frame was delivered.
603 * This function is used by the control SW to store a callback pointer
604 * for a given type of management frame.
605 *
606 * @param pdev - the data physical device object
607 * @param type - the type of mgmt frame the callback is used for
608 * @param download_cb - the callback for notification of delivery to the target
609 * @param ota_ack_cb - the callback for notification of delivery to the peer
610 * @param ctxt - context to use with the callback
611 */
612void
613ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
614 uint8_t type,
615 ol_txrx_mgmt_tx_cb download_cb,
616 ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt);
617
618/**
619 * @brief Transmit a management frame.
620 * @details
621 * Send the specified management frame from the specified virtual device.
622 * The type is used for determining whether to invoke a callback to inform
623 * the sender that the tx mgmt frame was delivered, and if so, which
624 * callback to use.
625 *
626 * @param vdev - virtual device transmitting the frame
627 * @param tx_mgmt_frm - management frame to transmit
628 * @param type - the type of managment frame (determines what callback to use)
629 * @param use_6mbps - specify whether management frame to transmit should use 6 Mbps
630 * rather than 1 Mbps min rate(for 5GHz band or P2P)
631 * @return
632 * 0 -> the frame is accepted for transmission, -OR-
633 * 1 -> the frame was not accepted
634 */
635int
636ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
Nirav Shahcbc6d722016-03-01 16:24:53 +0530637 qdf_nbuf_t tx_mgmt_frm,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 uint8_t type, uint8_t use_6mbps, uint16_t chanfreq);
639
640/**
641 * @brief Setup the monitor mode vap (vdev) for this pdev
642 * @details
643 * When a non-NULL vdev handle is registered as the monitor mode vdev, all
644 * packets received by the system are delivered to the OS stack on this
645 * interface in 802.11 MPDU format. Only a single monitor mode interface
646 * can be up at any timer. When the vdev handle is set to NULL the monitor
647 * mode delivery is stopped. This handle may either be a unique vdev
648 * object that only receives monitor mode packets OR a point to a a vdev
649 * object that also receives non-monitor traffic. In the second case the
650 * OS stack is responsible for delivering the two streams using approprate
651 * OS APIs
652 *
653 * @param pdev - the data physical device object
654 * @param vdev - the data virtual device object to deliver monitor mode
655 * packets on
656 * @return
657 * 0 -> the monitor mode vap was sucessfully setup
658 * -1 -> Unable to setup monitor mode
659 */
660int
661ol_txrx_set_monitor_mode_vap(ol_txrx_pdev_handle pdev,
662 ol_txrx_vdev_handle vdev);
663
664/**
665 * @brief Setup the current operating channel of the device
666 * @details
667 * Mainly used when populating monitor mode status that requires the
668 * current operating channel
669 *
670 * @param pdev - the data physical device object
671 * @param chan_mhz - the channel frequency (mhz)
672 * packets on
673 * @return - void
674 */
675void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz);
676
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530677QDF_STATUS ol_txrx_bus_suspend(void);
678QDF_STATUS ol_txrx_bus_resume(void);
679QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800680
681/**
682 * @brief Get the number of pending transmit frames that are awaiting completion.
683 * @details
684 * Mainly used in clean up path to make sure all buffers have been free'ed
685 *
686 * @param pdev - the data physical device object
687 * @return - count of pending frames
688 */
689int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev);
690
691/**
692 * @brief Discard all tx frames that are pending in txrx.
693 * @details
694 * Mainly used in clean up path to make sure all pending tx packets
695 * held by txrx are returned back to OS shim immediately.
696 *
697 * @param pdev - the data physical device object
698 * @return - void
699 */
700void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev);
701
702/**
703 * @brief set the safemode of the device
704 * @details
705 * This flag is used to bypass the encrypt and decrypt processes when send and
706 * receive packets. It works like open AUTH mode, HW will treate all packets
707 * as non-encrypt frames because no key installed. For rx fragmented frames,
708 * it bypasses all the rx defragmentaion.
709 *
710 * @param vdev - the data virtual device object
711 * @param val - the safemode state
712 * @return - void
713 */
714void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val);
715
716/**
717 * @brief set the privacy filter
718 * @details
719 * Rx related. Set the privacy filters. When rx packets, check
720 * the ether type, filter type and packet type
721 * to decide whether discard these packets.
722 *
723 * @param vdev - the data virtual device object
724 * @param filter - filters to be set
725 * @param num - the number of filters
726 * @return - void
727 */
728void
729ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
730 void *filter, uint32_t num);
731
732/**
733 * @brief configure the drop unencrypted frame flag
734 * @details
735 * Rx related. When set this flag, all the unencrypted frames
736 * received over a secure connection will be discarded
737 *
738 * @param vdev - the data virtual device object
739 * @param val - flag
740 * @return - void
741 */
742void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val);
743
744enum ol_txrx_peer_state {
745 ol_txrx_peer_state_invalid,
746 ol_txrx_peer_state_disc, /* initial state */
747 ol_txrx_peer_state_conn, /* authentication in progress */
748 ol_txrx_peer_state_auth, /* authentication successful */
749};
750
751/**
752 * @brief specify the peer's authentication state
753 * @details
754 * Specify the peer's authentication state (none, connected, authenticated)
755 * to allow the data SW to determine whether to filter out invalid data frames.
756 * (In the "connected" state, where security is enabled, but authentication
757 * has not completed, tx and rx data frames other than EAPOL or WAPI should
758 * be discarded.)
759 * This function is only relevant for systems in which the tx and rx filtering
760 * are done in the host rather than in the target.
761 *
762 * @param data_peer - which peer has changed its state
763 * @param state - the new state of the peer
764 *
765 * Return: CDF Status
766 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530767QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800768ol_txrx_peer_state_update(ol_txrx_pdev_handle pdev, uint8_t *peer_addr,
769 enum ol_txrx_peer_state state);
770
771void
772ol_txrx_peer_keyinstalled_state_update(ol_txrx_peer_handle data_peer,
773 uint8_t val);
774
775#define ol_tx_addba_conf(data_peer, tid, status) /* no-op */
776
777/**
778 * @brief Find a txrx peer handle from the peer's MAC address
779 * @details
780 * The control SW typically uses the txrx peer handle to refer to the peer.
781 * In unusual circumstances, if it is infeasible for the control SW maintain
782 * the txrx peer handle but it can maintain the peer's MAC address,
783 * this function allows the peer handled to be retrieved, based on the peer's
784 * MAC address.
785 * In cases where there are multiple peer objects with the same MAC address,
786 * it is undefined which such object is returned.
787 * This function does not increment the peer's reference count. Thus, it is
788 * only suitable for use as long as the control SW has assurance that it has
789 * not deleted the peer object, by calling ol_txrx_peer_detach.
790 *
791 * @param pdev - the data physical device object
792 * @param peer_mac_addr - MAC address of the peer in question
793 * @return handle to the txrx peer object
794 */
795ol_txrx_peer_handle
796ol_txrx_peer_find_by_addr(ol_txrx_pdev_handle pdev, uint8_t *peer_mac_addr);
797
798/**
799 * @brief Find a txrx peer handle from a peer's local ID
800 * @details
801 * The control SW typically uses the txrx peer handle to refer to the peer.
802 * In unusual circumstances, if it is infeasible for the control SW maintain
803 * the txrx peer handle but it can maintain a small integer local peer ID,
804 * this function allows the peer handled to be retrieved, based on the local
805 * peer ID.
806 *
807 * @param pdev - the data physical device object
808 * @param local_peer_id - the ID txrx assigned locally to the peer in question
809 * @return handle to the txrx peer object
810 */
811#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
812ol_txrx_peer_handle
813ol_txrx_peer_find_by_local_id(ol_txrx_pdev_handle pdev, uint8_t local_peer_id);
814#else
815#define ol_txrx_peer_find_by_local_id(pdev, local_peer_id) NULL
816#endif
817
818struct ol_txrx_peer_stats_t {
819 struct {
820 struct {
821 uint32_t ucast;
822 uint32_t mcast;
823 uint32_t bcast;
824 } frms;
825 struct {
826 uint32_t ucast;
827 uint32_t mcast;
828 uint32_t bcast;
829 } bytes;
830 } tx;
831 struct {
832 struct {
833 uint32_t ucast;
834 uint32_t mcast;
835 uint32_t bcast;
836 } frms;
837 struct {
838 uint32_t ucast;
839 uint32_t mcast;
840 uint32_t bcast;
841 } bytes;
842 } rx;
843};
844
845/**
846 * @brief Provide a snapshot of the txrx counters for the specified peer
847 * @details
848 * The txrx layer optionally maintains per-peer stats counters.
849 * This function provides the caller with a consistent snapshot of the
850 * txrx stats counters for the specified peer.
851 *
852 * @param pdev - the data physical device object
853 * @param peer - which peer's stats counters are requested
854 * @param stats - buffer for holding the stats counters snapshot
855 * @return success / failure status
856 */
857#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
858A_STATUS
859ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
860 ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats);
861#else
862#define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR /* failure */
863#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
864
865/* Config parameters for txrx_pdev */
866struct txrx_pdev_cfg_param_t {
867 uint8_t is_full_reorder_offload;
868 /* IPA Micro controller data path offload enable flag */
869 uint8_t is_uc_offload_enabled;
870 /* IPA Micro controller data path offload TX buffer count */
871 uint32_t uc_tx_buffer_count;
872 /* IPA Micro controller data path offload TX buffer size */
873 uint32_t uc_tx_buffer_size;
874 /* IPA Micro controller data path offload RX indication ring count */
875 uint32_t uc_rx_indication_ring_count;
876 /* IPA Micro controller data path offload TX partition base */
877 uint32_t uc_tx_partition_base;
878 /* IP, TCP and UDP checksum offload */
879 bool ip_tcp_udp_checksum_offload;
880 /* Rx processing in thread from TXRX */
881 bool enable_rxthread;
882 /* CE classification enabled through INI */
883 bool ce_classify_enabled;
884#ifdef QCA_LL_TX_FLOW_CONTROL_V2
885 /* Threshold to stop queue in percentage */
886 uint32_t tx_flow_stop_queue_th;
887 /* Start queue offset in percentage */
888 uint32_t tx_flow_start_queue_offset;
889#endif
890};
891
892/**
893 * @brief Setup configuration parameters
894 * @details
895 * Allocation configuration context that will be used across data path
896 *
897 * @param osdev - OS handle needed as an argument for some OS primitives
898 * @return the control device object
899 */
Anurag Chouhan6d760662016-02-20 16:05:43 +0530900ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800901 struct txrx_pdev_cfg_param_t cfg_param);
902
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530903QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800904void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
905
906
907#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
908#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
909uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
910ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
911 uint8_t *peer_addr,
912 uint8_t *peer_id);
913ol_txrx_peer_handle
914ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
915 ol_txrx_vdev_handle vdev,
916 uint8_t *peer_addr, uint8_t *peer_id);
917#else
918#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
919#define ol_txrx_find_peer_by_addr(pdev, peer_addr, peer_id) NULL
920#define ol_txrx_find_peer_by_addr_and_vdev(pdev, vdev, peer_addr, peer_id) NULL
921#endif
922
923#define OL_TXRX_RSSI_INVALID 0xffff
924/**
925 * @brief Provide the current RSSI average from data frames sent by a peer.
926 * @details
927 * If a peer has sent data frames, the data SW will optionally keep
928 * a running average of the RSSI observed for those data frames.
929 * This function returns that time-average RSSI if is it available,
930 * or OL_TXRX_RSSI_INVALID if either RSSI tracking is disabled or if
931 * no data frame indications with valid RSSI meta-data have been received.
932 * The RSSI is in approximate dBm units, and is normalized with respect
933 * to a 20 MHz channel. For example, if a data frame is received on a
934 * 40 MHz channel, wherein both the primary 20 MHz channel and the
935 * secondary 20 MHz channel have an RSSI of -77 dBm, the reported RSSI
936 * will be -77 dBm, rather than the actual -74 dBm RSSI from the
937 * combination of the primary + extension 20 MHz channels.
938 * Alternatively, the RSSI may be evaluated only on the primary 20 MHz
939 * channel.
940 *
941 * @param peer - which peer's RSSI is desired
942 * @return RSSI evaluted from frames sent by the specified peer
943 */
944#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
945int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
946#else
947#define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
948#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
949
950#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
951#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
952uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
953#else
954#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
955#endif
956
957#ifdef QCA_COMPUTE_TX_DELAY
958/**
959 * @brief updates the compute interval period for TSM stats.
960 * @details
961 * @param interval - interval for stats computation
962 */
963void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval);
964
965/**
966 * @brief Return the uplink (transmitted) packet count and loss count.
967 * @details
968 * This function will be called for getting uplink packet count and
969 * loss count for given stream (access category) a regular interval.
970 * This also resets the counters hence, the value returned is packets
971 * counted in last 5(default) second interval. These counter are
972 * incremented per access category in ol_tx_completion_handler()
973 *
974 * @param category - access category of interest
975 * @param out_packet_count - number of packets transmitted
976 * @param out_packet_loss_count - number of packets lost
977 */
978void
979ol_tx_packet_count(ol_txrx_pdev_handle pdev,
980 uint16_t *out_packet_count,
981 uint16_t *out_packet_loss_count, int category);
982#endif
983
984/**
985 * @brief Return the average delays for tx frames.
986 * @details
987 * Return the average of the total time tx frames spend within the driver
988 * and the average time tx frames take to be transmitted.
989 * These averages are computed over a 5 second time interval.
990 * These averages are computed separately for separate access categories,
991 * if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
992 *
993 * @param pdev - the data physical device instance
994 * @param queue_delay_microsec - average time tx frms spend in the WLAN driver
995 * @param tx_delay_microsec - average time for frames to be transmitted
996 * @param category - category (TID) of interest
997 */
998#ifdef QCA_COMPUTE_TX_DELAY
999void
1000ol_tx_delay(ol_txrx_pdev_handle pdev,
1001 uint32_t *queue_delay_microsec,
1002 uint32_t *tx_delay_microsec, int category);
1003#else
1004static inline void
1005ol_tx_delay(ol_txrx_pdev_handle pdev,
1006 uint32_t *queue_delay_microsec,
1007 uint32_t *tx_delay_microsec, int category)
1008{
1009 /* no-op version if QCA_COMPUTE_TX_DELAY is not set */
1010 *queue_delay_microsec = *tx_delay_microsec = 0;
1011}
1012#endif
1013
1014/*
1015 * Bins used for reporting delay histogram:
1016 * bin 0: 0 - 10 ms delay
1017 * bin 1: 10 - 20 ms delay
1018 * bin 2: 20 - 40 ms delay
1019 * bin 3: 40 - 80 ms delay
1020 * bin 4: 80 - 160 ms delay
1021 * bin 5: > 160 ms delay
1022 */
1023#define QCA_TX_DELAY_HIST_REPORT_BINS 6
1024/**
1025 * @brief Provide a histogram of tx queuing delays.
1026 * @details
1027 * Return a histogram showing the number of tx frames of the specified
1028 * category for each of the delay levels in the histogram bin spacings
1029 * listed above.
1030 * These histograms are computed over a 5 second time interval.
1031 * These histograms are computed separately for separate access categories,
1032 * if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
1033 *
1034 * @param pdev - the data physical device instance
1035 * @param bin_values - an array of QCA_TX_DELAY_HIST_REPORT_BINS elements
1036 * This array gets filled in with the histogram bin counts.
1037 * @param category - category (TID) of interest
1038 */
1039#ifdef QCA_COMPUTE_TX_DELAY
1040void
1041ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category);
1042#else
1043static inline void
1044ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category)
1045{
1046 /* no-op version if QCA_COMPUTE_TX_DELAY is not set */
Anurag Chouhanc5548422016-02-24 18:33:27 +05301047 qdf_assert(bin_values);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301048 qdf_mem_zero(bin_values,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001049 QCA_TX_DELAY_HIST_REPORT_BINS * sizeof(*bin_values));
1050}
1051#endif
1052
1053#if defined(QCA_SUPPORT_TX_THROTTLE)
1054/**
1055 * @brief Set the thermal mitgation throttling level.
1056 * @details
1057 * This function applies only to LL systems. This function is used set the
1058 * tx throttle level used for thermal mitigation
1059 *
1060 * @param pdev - the physics device being throttled
1061 */
1062void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level);
1063#else
1064static inline void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev,
1065 int level)
1066{
1067 /* no-op */
1068}
1069#endif /* QCA_SUPPORT_TX_THROTTLE */
1070
1071#if defined(QCA_SUPPORT_TX_THROTTLE)
1072/**
1073 * @brief Configure the thermal mitgation throttling period.
1074 * @details
1075 * This function applies only to LL systems. This function is used set the
1076 * period over which data will be throttled
1077 *
1078 * @param pdev - the physics device being throttled
1079 */
1080void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period);
1081#else
1082static inline void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev,
1083 int period)
1084{
1085 /* no-op */
1086}
1087#endif /* QCA_SUPPORT_TX_THROTTLE */
1088
1089void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val);
1090
1091
1092#ifdef IPA_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001093void
1094ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301095 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001096 uint32_t *ce_sr_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301097 qdf_dma_addr_t *ce_reg_paddr,
1098 qdf_dma_addr_t *tx_comp_ring_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001099 uint32_t *tx_comp_ring_size,
1100 uint32_t *tx_num_alloc_buffer,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301101 qdf_dma_addr_t *rx_rdy_ring_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 uint32_t *rx_rdy_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301103 qdf_dma_addr_t *rx_proc_done_idx_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001104 void **rx_proc_done_idx_vaddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301105 qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001106 uint32_t *rx2_rdy_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301107 qdf_dma_addr_t *rx2_proc_done_idx_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001108 void **rx2_proc_done_idx_vaddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001109
Leo Chang8e073612015-11-13 10:55:34 -08001110
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001111void
1112ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301113 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
1114 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001116void
1117ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx);
1118
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg);
1120
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001121void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
1122 void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
1123 void *osif_ctxt),
1124 void *osif_dev);
1125
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001126void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev);
1127#else
Leo Chang8e073612015-11-13 10:55:34 -08001128/**
1129 * ol_txrx_ipa_uc_get_resource() - Client request resource information
1130 * @pdev: handle to the HTT instance
1131 * @ce_sr_base_paddr: copy engine source ring base physical address
1132 * @ce_sr_ring_size: copy engine source ring size
1133 * @ce_reg_paddr: copy engine register physical address
1134 * @tx_comp_ring_base_paddr: tx comp ring base physical address
1135 * @tx_comp_ring_size: tx comp ring size
1136 * @tx_num_alloc_buffer: number of allocated tx buffer
1137 * @rx_rdy_ring_base_paddr: rx ready ring base physical address
1138 * @rx_rdy_ring_size: rx ready ring size
1139 * @rx_proc_done_idx_paddr: rx process done index physical address
1140 * @rx_proc_done_idx_vaddr: rx process done index virtual address
1141 * @rx2_rdy_ring_base_paddr: rx done ring base physical address
1142 * @rx2_rdy_ring_size: rx done ring size
1143 * @rx2_proc_done_idx_paddr: rx done index physical address
1144 * @rx2_proc_done_idx_vaddr: rx done index virtual address
1145 *
1146 * OL client will reuqest IPA UC related resource information
1147 * Resource information will be distributted to IPA module
1148 * All of the required resources should be pre-allocated
1149 *
1150 * Return: none
1151 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001152static inline void
1153ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301154 qdf_dma_addr_t *ce_sr_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001155 uint32_t *ce_sr_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301156 qdf_dma_addr_t *ce_reg_paddr,
1157 qdf_dma_addr_t *tx_comp_ring_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001158 uint32_t *tx_comp_ring_size,
1159 uint32_t *tx_num_alloc_buffer,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301160 qdf_dma_addr_t *rx_rdy_ring_base_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001161 uint32_t *rx_rdy_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301162 qdf_dma_addr_t *rx_proc_done_idx_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001163 void **rx_proc_done_idx_vaddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301164 qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001165 uint32_t *rx2_rdy_ring_size,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301166 qdf_dma_addr_t *rx2_proc_done_idx_paddr,
Leo Chang8e073612015-11-13 10:55:34 -08001167 void **rx2_proc_done_idx_vaddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168{
1169 return;
1170}
1171
Leo Chang8e073612015-11-13 10:55:34 -08001172/**
1173 * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
1174 * @pdev: handle to the HTT instance
1175 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
1176 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
1177 *
1178 * IPA UC let know doorbell register physical address
1179 * WLAN firmware will use this physical address to notify IPA UC
1180 *
1181 * Return: none
1182 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001183static inline void
1184ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
Anurag Chouhan6d760662016-02-20 16:05:43 +05301185 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
1186 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187{
1188 return;
1189}
1190
Leo Chang8e073612015-11-13 10:55:34 -08001191/**
1192 * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
1193 * @pdev: handle to the HTT instance
1194 * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
1195 * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
1196 *
1197 * IPA UC let know doorbell register physical address
1198 * WLAN firmware will use this physical address to notify IPA UC
1199 *
1200 * Return: none
1201 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001202static inline void
1203ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev,
1204 bool uc_active, bool is_tx)
1205{
1206 return;
1207}
1208
Leo Chang8e073612015-11-13 10:55:34 -08001209/**
1210 * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
1211 * @pdev: handle to the HTT instance
1212 * @op_msg: op response message from firmware
1213 *
1214 * Return: none
1215 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001216static inline void
1217ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
1218{
1219 return;
1220}
1221
Leo Chang8e073612015-11-13 10:55:34 -08001222/**
1223 * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
1224 * @pdev: handle to the HTT instance
1225 * @op_cb: handler function pointer
1226 * @osif_dev: register client context
1227 *
1228 * Return: none
1229 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001230static inline void
1231ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
1232 void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
1233 void *osif_ctxt),
1234 void *osif_dev)
1235{
1236 return;
1237}
1238
Leo Chang8e073612015-11-13 10:55:34 -08001239/**
1240 * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
1241 * @pdev: handle to the HTT instance
1242 *
1243 * Return: none
1244 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001245static inline void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
1246{
1247 return;
1248}
1249#endif /* IPA_OFFLOAD */
1250
1251void ol_txrx_display_stats(uint16_t bitmap);
1252void ol_txrx_clear_stats(uint16_t bitmap);
1253int ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len);
1254
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301255QDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001256 uint8_t *peer_id);
1257
1258void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
1259 struct ol_txrx_peer_t *peer);
1260
1261bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
1262 struct ol_txrx_peer_t **peer);
1263
Nirav Shah22bf44d2015-12-10 15:39:48 +05301264void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value);
1265uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void);
1266
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267/* TX FLOW Control related functions */
1268#ifdef QCA_LL_TX_FLOW_CONTROL_V2
1269#define TX_FLOW_MGMT_POOL_ID 0xEF
1270
1271#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
1272#define TX_FLOW_MGMT_POOL_SIZE 32
1273#else
1274#define TX_FLOW_MGMT_POOL_SIZE 0
1275#endif
1276
1277void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev);
1278void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev);
1279void ol_tx_dump_flow_pool_info(void);
1280void ol_tx_clear_flow_pool_stats(void);
1281void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
1282 uint8_t flow_pool_id, uint16_t flow_pool_size);
1283void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
1284 uint8_t flow_pool_id);
1285struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
1286 uint16_t flow_pool_size);
1287int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool);
1288void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
1289#else
1290
1291static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
1292{
1293 return;
1294}
1295static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
1296{
1297 return;
1298}
1299static inline void ol_tx_dump_flow_pool_info(void)
1300{
1301 return;
1302}
1303static inline void ol_tx_clear_flow_pool_stats(void)
1304{
1305 return;
1306}
1307static inline void ol_tx_flow_pool_map_handler(uint8_t flow_id,
1308 uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
1309{
1310 return;
1311}
1312static inline void ol_tx_flow_pool_unmap_handler(uint8_t flow_id,
1313 uint8_t flow_type, uint8_t flow_pool_id)
1314{
1315 return;
1316}
1317static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(
1318 uint8_t flow_pool_id, uint16_t flow_pool_size)
1319{
1320 return NULL;
1321}
1322static inline int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
1323{
1324 return 0;
1325}
1326static inline void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
1327{
1328 return;
1329}
1330#endif
1331
1332#endif /* _OL_TXRX_CTRL_API__H_ */