blob: e021129dcfbe32c7517328571536c8be62cdf450 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Nirav Shahc4aa1ab2018-04-21 12:38:44 +05302 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/**
20 * @file ol_tx_queue.h
21 * @brief API definitions for the tx frame queue module within the data SW.
22 */
23#ifndef _OL_TX_QUEUE__H_
24#define _OL_TX_QUEUE__H_
25
Nirav Shahcbc6d722016-03-01 16:24:53 +053026#include <qdf_nbuf.h> /* qdf_nbuf_t */
Dhanashri Atre12a08392016-02-17 13:10:34 -080027#include <cdp_txrx_cmn.h> /* ol_txrx_vdev_t, etc. */
Anurag Chouhan6d760662016-02-20 16:05:43 +053028#include <qdf_types.h> /* bool */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080029
30/*--- function prototypes for optional queue log feature --------------------*/
Siddarth Poddarb2011f62016-04-27 20:45:42 +053031#if defined(ENABLE_TX_QUEUE_LOG) || \
32 (defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
Siddarth Poddarb2011f62016-04-27 20:45:42 +053034/**
35 * ol_tx_queue_log_enqueue() - enqueue tx queue logs
36 * @pdev: physical device object
37 * @msdu_info: tx msdu meta data
38 * @frms: number of frames for which logs need to be enqueued
39 * @bytes: number of bytes
40 *
41 *
42 * Return: None
43 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080044void
45ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
46 struct ol_txrx_msdu_info_t *msdu_info,
47 int frms, int bytes);
Siddarth Poddarb2011f62016-04-27 20:45:42 +053048
49/**
50 * ol_tx_queue_log_dequeue() - dequeue tx queue logs
51 * @pdev: physical device object
52 * @txq: tx queue
53 * @frms: number of frames for which logs need to be dequeued
54 * @bytes: number of bytes
55 *
56 *
57 * Return: None
58 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080059void
60ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
61 struct ol_tx_frms_queue_t *txq, int frms, int bytes);
Siddarth Poddarb2011f62016-04-27 20:45:42 +053062
63/**
64 * ol_tx_queue_log_free() - free tx queue logs
65 * @pdev: physical device object
66 * @txq: tx queue
67 * @tid: tid value
68 * @frms: number of frames for which logs need to be freed
69 * @bytes: number of bytes
Poddar, Siddarth74178df2016-08-09 17:32:50 +053070 * @is_peer_txq - peer queue or not
Siddarth Poddarb2011f62016-04-27 20:45:42 +053071 *
72 *
73 * Return: None
74 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080075void
76ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
77 struct ol_tx_frms_queue_t *txq,
Poddar, Siddarth74178df2016-08-09 17:32:50 +053078 int tid, int frms, int bytes, bool is_peer_txq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079
80#else
81
Siddarth Poddarb2011f62016-04-27 20:45:42 +053082static inline void
83ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
84 struct ol_txrx_msdu_info_t *msdu_info,
85 int frms, int bytes)
86{
Siddarth Poddarb2011f62016-04-27 20:45:42 +053087}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088
Siddarth Poddarb2011f62016-04-27 20:45:42 +053089static inline void
90ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
91 struct ol_tx_frms_queue_t *txq, int frms, int bytes)
92{
Siddarth Poddarb2011f62016-04-27 20:45:42 +053093}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080094
Siddarth Poddarb2011f62016-04-27 20:45:42 +053095static inline void
96ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
97 struct ol_tx_frms_queue_t *txq,
Poddar, Siddarth74178df2016-08-09 17:32:50 +053098 int tid, int frms, int bytes, bool is_peer_txq)
Siddarth Poddarb2011f62016-04-27 20:45:42 +053099{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530100}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800101
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530102#endif
103
104#if defined(CONFIG_HL_SUPPORT)
105
106/**
107 * @brief Queue a tx frame to the tid queue.
108 *
109 * @param pdev - the data virtual device sending the data
110 * (for storing the tx desc in the virtual dev's tx_target_list,
111 * and for accessing the phy dev)
112 * @param txq - which queue the tx frame gets stored in
113 * @param tx_desc - tx meta-data, including prev and next ptrs
114 * @param tx_msdu_info - characteristics of the tx frame
115 */
116void
117ol_tx_enqueue(
118 struct ol_txrx_pdev_t *pdev,
119 struct ol_tx_frms_queue_t *txq,
120 struct ol_tx_desc_t *tx_desc,
121 struct ol_txrx_msdu_info_t *tx_msdu_info);
122
123/**
124 * @brief - remove the specified number of frames from the head of a tx queue
125 * @details
126 * This function removes frames from the head of a tx queue,
127 * and returns them as a NULL-terminated linked list.
128 * The function will remove frames until one of the following happens:
129 * 1. The tx queue is empty
130 * 2. The specified number of frames have been removed
131 * 3. Removal of more frames would exceed the specified credit limit
132 *
133 * @param pdev - the physical device object
134 * @param txq - which tx queue to remove frames from
135 * @param head - which contains return linked-list of tx frames (descriptors)
136 * @param num_frames - maximum number of frames to remove
137 * @param[in/out] credit -
138 * input: max credit the dequeued frames can consume
139 * output: how much credit the dequeued frames consume
140 * @param[out] bytes - the sum of the sizes of the dequeued frames
141 * @return number of frames dequeued
Yun Parkd79331a2017-04-06 22:18:36 -0700142 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530143u_int16_t
144ol_tx_dequeue(
145 struct ol_txrx_pdev_t *pdev,
146 struct ol_tx_frms_queue_t *txq,
147 ol_tx_desc_list *head,
148 u_int16_t num_frames,
149 u_int32_t *credit,
150 int *bytes);
151
152/**
153 * @brief - free all of frames from the tx queue while deletion
154 * @details
155 * This function frees all of frames from the tx queue.
156 * This function is called during peer or vdev deletion.
157 * This function notifies the scheduler, so the scheduler can update
158 * its state to account for the absence of the queue.
159 *
160 * @param pdev - the physical device object, which stores the txqs
161 * @param txq - which tx queue to free frames from
162 * @param tid - the extended TID that the queue belongs to
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530163 * @param is_peer_txq - peer queue or not
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530164 */
165void
166ol_tx_queue_free(
167 struct ol_txrx_pdev_t *pdev,
168 struct ol_tx_frms_queue_t *txq,
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530169 int tid, bool is_peer_txq);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530170
171/**
172 * @brief - discard pending tx frames from the tx queue
173 * @details
174 * This function is called if there are too many queues in tx scheduler.
175 * This function is called if we wants to flush all pending tx
176 * queues in tx scheduler.
177 *
178 * @param pdev - the physical device object, which stores the txqs
179 * @param flush_all - flush all pending tx queues if set to true
Yun Parkd79331a2017-04-06 22:18:36 -0700180 * @param tx_descs - List Of tx_descs to be discarded will be returned by this
181 * function
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530182 */
183
184void
185ol_tx_queue_discard(
186 struct ol_txrx_pdev_t *pdev,
187 bool flush_all,
188 ol_tx_desc_list *tx_descs);
189
190#else
191
192static inline void
193ol_tx_enqueue(
194 struct ol_txrx_pdev_t *pdev,
195 struct ol_tx_frms_queue_t *txq,
196 struct ol_tx_desc_t *tx_desc,
197 struct ol_txrx_msdu_info_t *tx_msdu_info)
198{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530199}
200
201static inline u_int16_t
202ol_tx_dequeue(
203 struct ol_txrx_pdev_t *pdev,
204 struct ol_tx_frms_queue_t *txq,
205 ol_tx_desc_list *head,
206 u_int16_t num_frames,
207 u_int32_t *credit,
208 int *bytes)
209{
210 return 0;
211}
212
213static inline void
214ol_tx_queue_free(
215 struct ol_txrx_pdev_t *pdev,
216 struct ol_tx_frms_queue_t *txq,
Poddar, Siddarth74178df2016-08-09 17:32:50 +0530217 int tid, bool is_peer_txq)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530218{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530219}
220
221static inline void
222ol_tx_queue_discard(
223 struct ol_txrx_pdev_t *pdev,
224 bool flush_all,
225 ol_tx_desc_list *tx_descs)
226{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530227}
228#endif /* defined(CONFIG_HL_SUPPORT) */
229
Nirav Shahc4aa1ab2018-04-21 12:38:44 +0530230#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
231static inline
232void ol_txrx_vdev_flush(struct cdp_vdev *data_vdev)
233{
234}
235#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800236void ol_txrx_vdev_flush(struct cdp_vdev *pvdev);
Nirav Shahc4aa1ab2018-04-21 12:38:44 +0530237#endif
Jeff Johnsonb13a5012016-12-21 08:41:16 -0800238
239#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
Nirav Shahc4aa1ab2018-04-21 12:38:44 +0530240 (defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
Jeff Johnsonb13a5012016-12-21 08:41:16 -0800241 defined(CONFIG_HL_SUPPORT)
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800242void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason);
243void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason);
Jeff Johnsonb13a5012016-12-21 08:41:16 -0800244#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
245
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530246#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
247
248void
249ol_txrx_peer_bal_add_limit_peer(
250 struct ol_txrx_pdev_t *pdev,
251 u_int16_t peer_id,
252 u_int16_t peer_limit);
253
254void
255ol_txrx_peer_bal_remove_limit_peer(
256 struct ol_txrx_pdev_t *pdev,
257 u_int16_t peer_id);
258
259/**
260 * ol_txrx_peer_pause_but_no_mgmt_q() - suspend/pause all txqs except
261 * management queue for a given peer
262 * @peer: peer device object
263 *
264 * Return: None
265 */
266void
267ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer);
268
269/**
270 * ol_txrx_peer_unpause_but_no_mgmt_q() - unpause all txqs except management
271 * queue for a given peer
272 * @peer: peer device object
273 *
274 * Return: None
275 */
276void
277ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer);
278
279/**
280 * ol_tx_bad_peer_dequeue_check() - retrieve the send limit
281 * of the tx queue category
282 * @txq: tx queue of the head of the category list
283 * @max_frames: send limit of the txq category
284 * @tx_limit_flag: set true is tx limit is reached
285 *
286 * Return: send limit
287 */
288u_int16_t
289ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
290 u_int16_t max_frames,
291 u_int16_t *tx_limit_flag);
292
293/**
294 * ol_tx_bad_peer_update_tx_limit() - update the send limit of the
295 * tx queue category
296 * @pdev: the physical device object
297 * @txq: tx queue of the head of the category list
298 * @frames: frames that has been dequeued
299 * @tx_limit_flag: tx limit reached flag
300 *
301 * Return: None
302 */
303void
304ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
305 struct ol_tx_frms_queue_t *txq,
306 u_int16_t frames,
307 u_int16_t tx_limit_flag);
308
309/**
310 * ol_txrx_set_txq_peer() - set peer to the tx queue's peer
311 * @txq: tx queue for a given tid
312 * @peer: the peer device object
313 *
314 * Return: None
315 */
316void
317ol_txrx_set_txq_peer(
318 struct ol_tx_frms_queue_t *txq,
319 struct ol_txrx_peer_t *peer);
320
321/**
322 * @brief - initialize the peer balance context
323 * @param pdev - the physical device object, which stores the txqs
324 */
325void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev);
326
327/**
328 * @brief - deinitialize the peer balance context
329 * @param pdev - the physical device object, which stores the txqs
330 */
331void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev);
332
333#else
334
335static inline void ol_txrx_peer_bal_add_limit_peer(
336 struct ol_txrx_pdev_t *pdev,
337 u_int16_t peer_id,
338 u_int16_t peer_limit)
339{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530340}
341
342static inline void ol_txrx_peer_bal_remove_limit_peer(
343 struct ol_txrx_pdev_t *pdev,
344 u_int16_t peer_id)
345{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530346}
347
348static inline void ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
349{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530350}
351
352static inline void ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
353{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354}
355
356static inline u_int16_t
357ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
358 u_int16_t max_frames,
359 u_int16_t *tx_limit_flag)
360{
361 /* just return max_frames */
362 return max_frames;
363}
364
365static inline void
366ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
367 struct ol_tx_frms_queue_t *txq,
368 u_int16_t frames,
369 u_int16_t tx_limit_flag)
370{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530371}
372
373static inline void
374ol_txrx_set_txq_peer(
375 struct ol_tx_frms_queue_t *txq,
376 struct ol_txrx_peer_t *peer)
377{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530378}
379
380static inline void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
381{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530382}
383
384static inline void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
385{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530386}
387
388#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
389
390#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
391
392/**
393 * ol_tx_queue_log_sched() - start logging of tx queues for HL
394 * @pdev: physical device object
395 * @credit: number of credits
396 * @num_active_tids: number of active tids for which logging needs to be done
397 * @active_bitmap:bitmap
398 * @data: buffer
399 *
400 * Return: None
401 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402void
403ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
404 int credit,
405 int *num_active_tids,
406 uint32_t **active_bitmap, uint8_t **data);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530407#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530409static inline void
410ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
411 int credit,
412 int *num_active_tids,
413 uint32_t **active_bitmap, uint8_t **data)
414{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530415}
416#endif /* defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING) */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530418#if defined(CONFIG_HL_SUPPORT) && TXRX_DEBUG_LEVEL > 5
419/**
420 * @brief - show current state of all tx queues
421 * @param pdev - the physical device object, which stores the txqs
422 */
423void
424ol_tx_queues_display(struct ol_txrx_pdev_t *pdev);
425
426#else
427
428static inline void
429ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
430{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530431}
432#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433
434#define ol_tx_queue_decs_reinit(peer, peer_id) /* no-op */
435
436#ifdef QCA_SUPPORT_TX_THROTTLE
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800437void ol_tx_throttle_set_level(struct cdp_pdev *ppdev, int level);
438void ol_tx_throttle_init_period(struct cdp_pdev *ppdev, int period,
Jeff Johnsonb13a5012016-12-21 08:41:16 -0800439 uint8_t *dutycycle_level);
440
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441/**
442 * @brief - initialize the throttle context
443 * @param pdev - the physical device object, which stores the txqs
444 */
445void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
446#else
Jeff Johnsonb13a5012016-12-21 08:41:16 -0800447static inline void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev) {}
Nirav Shah46fc6302018-04-21 12:49:28 +0530448
449static inline void ol_tx_throttle_set_level(struct cdp_pdev *ppdev, int level)
450{}
451
452static inline void ol_tx_throttle_init_period(struct cdp_pdev *ppdev,
453 int period,
454 uint8_t *dutycycle_level)
455{}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530457
458#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
459
460static inline bool
461ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
462 struct ol_tx_frms_queue_t *txq)
463{
464 return txq == pdev->tx_sched.last_used_txq;
465}
466
467/**
468 * ol_tx_txq_group_credit_limit() - check for credit limit of a given tx queue
469 * @pdev: physical device object
470 * @txq: tx queue for which credit limit needs be to checked
471 * @credit: number of credits of the selected category
472 *
473 * Return: updated credits
474 */
475u_int32_t ol_tx_txq_group_credit_limit(
476 struct ol_txrx_pdev_t *pdev,
477 struct ol_tx_frms_queue_t *txq,
478 u_int32_t credit);
479
480/**
481 * ol_tx_txq_group_credit_update() - update group credits of the
482 * selected catoegory
483 * @pdev: physical device object
484 * @txq: tx queue for which credit needs to be updated
485 * @credit: number of credits by which selected category needs to be updated
486 * @absolute: TXQ group absolute value
487 *
488 * Return: None
489 */
490void ol_tx_txq_group_credit_update(
491 struct ol_txrx_pdev_t *pdev,
492 struct ol_tx_frms_queue_t *txq,
493 int32_t credit,
494 u_int8_t absolute);
495
496/**
497 * ol_tx_set_vdev_group_ptr() - update vdev queues group pointer
498 * @pdev: physical device object
499 * @vdev_id: vdev id for which group pointer needs to update
500 * @grp_ptr: pointer to ol tx queue group which needs to be set for vdev queues
501 *
502 * Return: None
503 */
504void
505ol_tx_set_vdev_group_ptr(
506 ol_txrx_pdev_handle pdev,
507 u_int8_t vdev_id,
508 struct ol_tx_queue_group_t *grp_ptr);
509
510/**
511 * ol_tx_txq_set_group_ptr() - update tx queue group pointer
512 * @txq: tx queue of which group pointer needs to update
513 * @grp_ptr: pointer to ol tx queue group which needs to be
514 * set for given tx queue
515 *
516 *
517 * Return: None
518 */
519void
520ol_tx_txq_set_group_ptr(
521 struct ol_tx_frms_queue_t *txq,
522 struct ol_tx_queue_group_t *grp_ptr);
523
524/**
525 * ol_tx_set_peer_group_ptr() - update peer tx queues group pointer
526 * for a given tid
527 * @pdev: physical device object
528 * @peer: peer device object
529 * @vdev_id: vdev id
530 * @tid: tid for which group pointer needs to update
531 *
532 *
533 * Return: None
534 */
535void
536ol_tx_set_peer_group_ptr(
537 ol_txrx_pdev_handle pdev,
538 struct ol_txrx_peer_t *peer,
539 u_int8_t vdev_id,
540 u_int8_t tid);
541#else
542
543static inline bool
544ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
545 struct ol_tx_frms_queue_t *txq)
546{
547 return 0;
548}
549
550static inline
551u_int32_t ol_tx_txq_group_credit_limit(
552 struct ol_txrx_pdev_t *pdev,
553 struct ol_tx_frms_queue_t *txq,
554 u_int32_t credit)
555{
556 return credit;
557}
558
559static inline void ol_tx_txq_group_credit_update(
560 struct ol_txrx_pdev_t *pdev,
561 struct ol_tx_frms_queue_t *txq,
562 int32_t credit,
563 u_int8_t absolute)
564{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530565}
566
567static inline void
568ol_tx_txq_set_group_ptr(
569 struct ol_tx_frms_queue_t *txq,
570 struct ol_tx_queue_group_t *grp_ptr)
571{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530572}
573
574static inline void
575ol_tx_set_peer_group_ptr(
576 ol_txrx_pdev_handle pdev,
577 struct ol_txrx_peer_t *peer,
578 u_int8_t vdev_id,
579 u_int8_t tid)
580{
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530581}
582#endif
583
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584#endif /* _OL_TX_QUEUE__H_ */