blob: 8ecb903858cc287ead9f73391715dd35657a61c3 [file] [log] [blame]
Leo Changc2a7b762016-09-26 13:15:41 -07001/*
Varsha Mishraa331e6e2019-03-11 12:16:14 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Leo Changc2a7b762016-09-26 13:15:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef DP_TX_DESC_H
20#define DP_TX_DESC_H
21
22#include "dp_types.h"
23#include "dp_tx.h"
Ishank Jainbc2d91f2017-01-03 18:14:54 +053024#include "dp_internal.h"
Leo Changc2a7b762016-09-26 13:15:41 -070025
26/**
27 * 21 bits cookie
Pamidipati, Vijay625276b2017-09-18 14:19:45 +053028 * 2 bits pool id 0 ~ 3,
29 * 10 bits page id 0 ~ 1023
Pamidipati, Vijay776310a2017-07-21 20:37:14 +053030 * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
Leo Changc2a7b762016-09-26 13:15:41 -070031 */
32/* ???Ring ID needed??? */
Pamidipati, Vijay625276b2017-09-18 14:19:45 +053033#define DP_TX_DESC_ID_POOL_MASK 0x018000
34#define DP_TX_DESC_ID_POOL_OS 15
35#define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
Pamidipati, Vijay776310a2017-07-21 20:37:14 +053036#define DP_TX_DESC_ID_PAGE_OS 5
37#define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
Leo Changc2a7b762016-09-26 13:15:41 -070038#define DP_TX_DESC_ID_OFFSET_OS 0
39
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070040#ifdef QCA_LL_TX_FLOW_CONTROL_V2
41#define TX_DESC_LOCK_CREATE(lock)
42#define TX_DESC_LOCK_DESTROY(lock)
43#define TX_DESC_LOCK_LOCK(lock)
44#define TX_DESC_LOCK_UNLOCK(lock)
Jinwei Chenfc765552018-05-18 13:27:32 +080045#define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
46 ((pool)->status == FLOW_POOL_INACTIVE)
chenguof44ac202018-08-28 18:58:52 +080047#ifdef QCA_AC_BASED_FLOW_CONTROL
48#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
49 dp_tx_flow_pool_member_clean(_tx_desc_pool)
50
51#else /* !QCA_AC_BASED_FLOW_CONTROL */
Jinwei Chen15da8a52018-05-22 16:12:10 +080052#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
53do { \
54 (_tx_desc_pool)->elem_size = 0; \
55 (_tx_desc_pool)->freelist = NULL; \
56 (_tx_desc_pool)->pool_size = 0; \
57 (_tx_desc_pool)->avail_desc = 0; \
58 (_tx_desc_pool)->start_th = 0; \
59 (_tx_desc_pool)->stop_th = 0; \
60 (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
61} while (0)
chenguof44ac202018-08-28 18:58:52 +080062#endif /* QCA_AC_BASED_FLOW_CONTROL */
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070063#else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Changc2a7b762016-09-26 13:15:41 -070064#define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
65#define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +053066#define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
67#define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
Jinwei Chenfc765552018-05-18 13:27:32 +080068#define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
Jinwei Chen15da8a52018-05-22 16:12:10 +080069#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
70do { \
71 (_tx_desc_pool)->elem_size = 0; \
72 (_tx_desc_pool)->num_allocated = 0; \
73 (_tx_desc_pool)->freelist = NULL; \
74 (_tx_desc_pool)->elem_count = 0; \
75 (_tx_desc_pool)->num_free = 0; \
76} while (0)
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070077#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
Ishank Jainc838b132017-02-17 11:08:18 +053078#define MAX_POOL_BUFF_COUNT 10000
Leo Changc2a7b762016-09-26 13:15:41 -070079
80QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
81 uint16_t num_elem);
82QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
83QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
84 uint16_t num_elem);
85QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
Ishank Jain5122f8f2017-03-15 22:22:47 +053086QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
87 uint16_t num_elem);
88void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -070089QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
90 uint16_t num_elem);
91void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070092
93#ifdef QCA_LL_TX_FLOW_CONTROL_V2
94void dp_tx_flow_control_init(struct dp_soc *);
95void dp_tx_flow_control_deinit(struct dp_soc *);
96
97QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
98 tx_pause_callback pause_cb);
Rakesh Pillaidce01372019-06-28 19:11:23 +053099QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
100 uint8_t vdev_id);
101void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800102 uint8_t vdev_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700103void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
104struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
105 uint8_t flow_pool_id, uint16_t flow_pool_size);
106
107QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
108 uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
109void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
110 uint8_t flow_type, uint8_t flow_pool_id);
111
112/**
113 * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
114 * @pool: flow pool
115 *
116 * Caller needs to take lock and do sanity checks.
117 *
118 * Return: tx descriptor
119 */
120static inline
121struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
122{
123 struct dp_tx_desc_s *tx_desc = pool->freelist;
124
125 pool->freelist = pool->freelist->next;
126 pool->avail_desc--;
127 return tx_desc;
128}
129
130/**
131 * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
132 * @pool: flow pool
133 * @tx_desc: tx descriptor
134 *
135 * Caller needs to take lock and do sanity checks.
136 *
137 * Return: none
138 */
139static inline
140void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
141 struct dp_tx_desc_s *tx_desc)
142{
143 tx_desc->next = pool->freelist;
144 pool->freelist = tx_desc;
145 pool->avail_desc++;
146}
147
chenguof44ac202018-08-28 18:58:52 +0800148#ifdef QCA_AC_BASED_FLOW_CONTROL
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700149
150/**
chenguof44ac202018-08-28 18:58:52 +0800151 * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
152 *
153 * @pool: flow pool
154 *
155 * Return: None
156 */
157static inline void
158dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
159{
160 pool->elem_size = 0;
161 pool->freelist = NULL;
162 pool->pool_size = 0;
163 pool->avail_desc = 0;
164 qdf_mem_zero(pool->start_th, FL_TH_MAX);
165 qdf_mem_zero(pool->stop_th, FL_TH_MAX);
166 pool->status = FLOW_POOL_INACTIVE;
167}
168
169/**
170 * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
171 *
172 * @pool: flow pool
173 * @avail_desc: available descriptor number
174 *
175 * Return: true if threshold is met, false if not
176 */
177static inline bool
178dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
179{
180 if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
181 return true;
182 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
183 return true;
184 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
185 return true;
186 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
187 return true;
188 else
189 return false;
190}
191
192/**
193 * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
194 *
195 * @soc: Handle to DP SoC structure
196 * @desc_pool_id: ID of the flow control fool
197 *
198 * Return: TX descriptor allocated or NULL
199 */
200static inline struct dp_tx_desc_s *
201dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
202{
203 struct dp_tx_desc_s *tx_desc = NULL;
204 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
205 bool is_pause = false;
Mohit Khanna6d22eeb2019-04-06 22:51:26 -0700206 enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
chenguof44ac202018-08-28 18:58:52 +0800207 enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
208
209 if (qdf_likely(pool)) {
210 qdf_spin_lock_bh(&pool->flow_pool_lock);
211 if (qdf_likely(pool->avail_desc)) {
212 tx_desc = dp_tx_get_desc_flow_pool(pool);
213 tx_desc->pool_id = desc_pool_id;
214 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
215 is_pause = dp_tx_is_threshold_reached(pool,
216 pool->avail_desc);
217
218 if (qdf_unlikely(is_pause)) {
219 switch (pool->status) {
220 case FLOW_POOL_ACTIVE_UNPAUSED:
221 /* pause network BE\BK queue */
222 act = WLAN_NETIF_BE_BK_QUEUE_OFF;
223 level = DP_TH_BE_BK;
224 pool->status = FLOW_POOL_BE_BK_PAUSED;
225 break;
226 case FLOW_POOL_BE_BK_PAUSED:
227 /* pause network VI queue */
228 act = WLAN_NETIF_VI_QUEUE_OFF;
229 level = DP_TH_VI;
230 pool->status = FLOW_POOL_VI_PAUSED;
231 break;
232 case FLOW_POOL_VI_PAUSED:
233 /* pause network VO queue */
234 act = WLAN_NETIF_VO_QUEUE_OFF;
235 level = DP_TH_VO;
236 pool->status = FLOW_POOL_VO_PAUSED;
237 break;
238 case FLOW_POOL_VO_PAUSED:
239 /* pause network HI PRI queue */
240 act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
241 level = DP_TH_HI;
242 pool->status = FLOW_POOL_ACTIVE_PAUSED;
243 break;
Mohit Khanna6d22eeb2019-04-06 22:51:26 -0700244 case FLOW_POOL_ACTIVE_PAUSED:
245 act = WLAN_NETIF_ACTION_TYPE_NONE;
246 break;
chenguof44ac202018-08-28 18:58:52 +0800247 default:
Mohit Khanna6d22eeb2019-04-06 22:51:26 -0700248 dp_err_rl("pool status is %d!",
chenguof44ac202018-08-28 18:58:52 +0800249 pool->status);
250 break;
251 }
Mohit Khanna6d22eeb2019-04-06 22:51:26 -0700252
253 if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
254 pool->latest_pause_time[level] =
255 qdf_get_system_timestamp();
256 soc->pause_cb(desc_pool_id,
257 act,
258 WLAN_DATA_FLOW_CONTROL);
259 }
chenguof44ac202018-08-28 18:58:52 +0800260 }
261 } else {
262 pool->pkt_drop_no_desc++;
chenguof44ac202018-08-28 18:58:52 +0800263 }
chenguoe4faf862018-10-10 13:12:15 +0800264 qdf_spin_unlock_bh(&pool->flow_pool_lock);
chenguof44ac202018-08-28 18:58:52 +0800265 } else {
266 soc->pool_stats.pkt_drop_no_pool++;
267 }
268
269 return tx_desc;
270}
271
272/**
273 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
274 *
275 * @soc: Handle to DP SoC structure
276 * @tx_desc: the tx descriptor to be freed
277 * @desc_pool_id: ID of the flow control fool
278 *
279 * Return: None
280 */
281static inline void
282dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
283 uint8_t desc_pool_id)
284{
285 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
286 qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
287 enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
288
289 qdf_spin_lock_bh(&pool->flow_pool_lock);
Jinwei Chen79b4fc82019-04-26 13:27:27 +0800290 tx_desc->vdev = NULL;
291 tx_desc->nbuf = NULL;
chenguof44ac202018-08-28 18:58:52 +0800292 tx_desc->flags = 0;
293 dp_tx_put_desc_flow_pool(pool, tx_desc);
294 switch (pool->status) {
295 case FLOW_POOL_ACTIVE_PAUSED:
296 if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
297 act = WLAN_NETIF_PRIORITY_QUEUE_ON;
298 pool->status = FLOW_POOL_VO_PAUSED;
299
300 /* Update maxinum pause duration for HI queue */
301 pause_dur = unpause_time -
302 pool->latest_pause_time[DP_TH_HI];
303 if (pool->max_pause_time[DP_TH_HI] < pause_dur)
304 pool->max_pause_time[DP_TH_HI] = pause_dur;
305 }
306 break;
307 case FLOW_POOL_VO_PAUSED:
308 if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
309 act = WLAN_NETIF_VO_QUEUE_ON;
310 pool->status = FLOW_POOL_VI_PAUSED;
311
312 /* Update maxinum pause duration for VO queue */
313 pause_dur = unpause_time -
314 pool->latest_pause_time[DP_TH_VO];
315 if (pool->max_pause_time[DP_TH_VO] < pause_dur)
316 pool->max_pause_time[DP_TH_VO] = pause_dur;
317 }
318 break;
319 case FLOW_POOL_VI_PAUSED:
320 if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
321 act = WLAN_NETIF_VI_QUEUE_ON;
322 pool->status = FLOW_POOL_BE_BK_PAUSED;
323
324 /* Update maxinum pause duration for VI queue */
325 pause_dur = unpause_time -
326 pool->latest_pause_time[DP_TH_VI];
327 if (pool->max_pause_time[DP_TH_VI] < pause_dur)
328 pool->max_pause_time[DP_TH_VI] = pause_dur;
329 }
330 break;
331 case FLOW_POOL_BE_BK_PAUSED:
332 if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
333 act = WLAN_WAKE_NON_PRIORITY_QUEUE;
334 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
335
336 /* Update maxinum pause duration for BE_BK queue */
337 pause_dur = unpause_time -
338 pool->latest_pause_time[DP_TH_BE_BK];
339 if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
340 pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
341 }
342 break;
343 case FLOW_POOL_INVALID:
344 if (pool->avail_desc == pool->pool_size) {
345 dp_tx_desc_pool_free(soc, desc_pool_id);
346 qdf_spin_unlock_bh(&pool->flow_pool_lock);
347 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
348 "%s %d pool is freed!!",
349 __func__, __LINE__);
350 return;
351 }
352 break;
353
354 case FLOW_POOL_ACTIVE_UNPAUSED:
355 break;
356 default:
357 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
358 "%s %d pool is INACTIVE State!!",
359 __func__, __LINE__);
360 break;
361 };
362
363 if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
364 soc->pause_cb(pool->flow_pool_id,
365 act, WLAN_DATA_FLOW_CONTROL);
366 qdf_spin_unlock_bh(&pool->flow_pool_lock);
367}
368#else /* QCA_AC_BASED_FLOW_CONTROL */
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +0530369
370static inline bool
371dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
372{
373 if (qdf_unlikely(avail_desc < pool->stop_th))
374 return true;
375 else
376 return false;
377}
378
chenguof44ac202018-08-28 18:58:52 +0800379/**
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700380 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
381 *
382 * @soc Handle to DP SoC structure
383 * @pool_id
384 *
385 * Return:
386 */
387static inline struct dp_tx_desc_s *
388dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
389{
390 struct dp_tx_desc_s *tx_desc = NULL;
391 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
392
393 if (pool) {
394 qdf_spin_lock_bh(&pool->flow_pool_lock);
Jinwei Chen15da8a52018-05-22 16:12:10 +0800395 if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
396 pool->avail_desc) {
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700397 tx_desc = dp_tx_get_desc_flow_pool(pool);
398 tx_desc->pool_id = desc_pool_id;
399 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
400 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
401 pool->status = FLOW_POOL_ACTIVE_PAUSED;
402 qdf_spin_unlock_bh(&pool->flow_pool_lock);
403 /* pause network queues */
404 soc->pause_cb(desc_pool_id,
405 WLAN_STOP_ALL_NETIF_QUEUE,
406 WLAN_DATA_FLOW_CONTROL);
407 } else {
408 qdf_spin_unlock_bh(&pool->flow_pool_lock);
409 }
Yue Ma442d36e2018-08-23 12:39:23 -0700410
411 /*
412 * If one packet is going to be sent, PM usage count
413 * needs to be incremented by one to prevent future
414 * runtime suspend. This should be tied with the
415 * success of allocating one descriptor. It will be
416 * decremented after the packet has been sent.
417 */
418 hif_pm_runtime_get_noresume(soc->hif_handle);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700419 } else {
420 pool->pkt_drop_no_desc++;
421 qdf_spin_unlock_bh(&pool->flow_pool_lock);
422 }
423 } else {
424 soc->pool_stats.pkt_drop_no_pool++;
425 }
426
427
428 return tx_desc;
429}
430
431/**
432 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
433 *
434 * @soc Handle to DP SoC structure
435 * @pool_id
436 * @tx_desc
437 *
438 * Return: None
439 */
440static inline void
441dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
442 uint8_t desc_pool_id)
443{
444 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
445
446 qdf_spin_lock_bh(&pool->flow_pool_lock);
Jinwei Chen79b4fc82019-04-26 13:27:27 +0800447 tx_desc->vdev = NULL;
448 tx_desc->nbuf = NULL;
Manjunathappa Prakash38ff9e32018-02-08 14:29:16 -0800449 tx_desc->flags = 0;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700450 dp_tx_put_desc_flow_pool(pool, tx_desc);
451 switch (pool->status) {
452 case FLOW_POOL_ACTIVE_PAUSED:
453 if (pool->avail_desc > pool->start_th) {
454 soc->pause_cb(pool->flow_pool_id,
455 WLAN_WAKE_ALL_NETIF_QUEUE,
456 WLAN_DATA_FLOW_CONTROL);
457 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
458 }
459 break;
460 case FLOW_POOL_INVALID:
461 if (pool->avail_desc == pool->pool_size) {
462 dp_tx_desc_pool_free(soc, desc_pool_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700463 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Aditya Sathishded018e2018-07-02 16:25:21 +0530464 qdf_print("%s %d pool is freed!!",
465 __func__, __LINE__);
Yue Ma442d36e2018-08-23 12:39:23 -0700466 goto out;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700467 }
468 break;
469
470 case FLOW_POOL_ACTIVE_UNPAUSED:
471 break;
472 default:
Aditya Sathishded018e2018-07-02 16:25:21 +0530473 qdf_print("%s %d pool is INACTIVE State!!",
474 __func__, __LINE__);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700475 break;
476 };
477
478 qdf_spin_unlock_bh(&pool->flow_pool_lock);
479
Yue Ma442d36e2018-08-23 12:39:23 -0700480out:
481 /**
482 * Decrement PM usage count if the packet has been sent. This
483 * should be tied with the success of freeing one descriptor.
484 */
485 hif_pm_runtime_put(soc->hif_handle);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700486}
chenguof44ac202018-08-28 18:58:52 +0800487
488#endif /* QCA_AC_BASED_FLOW_CONTROL */
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +0530489
490static inline bool
Rakesh Pillaidce01372019-06-28 19:11:23 +0530491dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +0530492{
Rakesh Pillaidce01372019-06-28 19:11:23 +0530493 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
494 struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
495 vdev_id);
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +0530496 struct dp_tx_desc_pool_s *pool;
497
498 if (!vdev)
499 return false;
500
Rakesh Pillaidce01372019-06-28 19:11:23 +0530501 pool = vdev->pool;
Sravan Kumar Kairamb75565e2018-12-17 17:55:44 +0530502
503 return dp_tx_is_threshold_reached(pool, pool->avail_desc);
504}
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700505#else /* QCA_LL_TX_FLOW_CONTROL_V2 */
506
507static inline void dp_tx_flow_control_init(struct dp_soc *handle)
508{
509}
510
511static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
512{
513}
514
515static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
516 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
517 uint16_t flow_pool_size)
518{
519 return QDF_STATUS_SUCCESS;
520}
521
522static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
523 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
524{
525}
526
Leo Changc2a7b762016-09-26 13:15:41 -0700527/**
528 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
529 *
530 * @param soc Handle to DP SoC structure
531 * @param pool_id
532 *
533 * Return:
534 */
535static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700536 uint8_t desc_pool_id)
Leo Changc2a7b762016-09-26 13:15:41 -0700537{
538 struct dp_tx_desc_s *tx_desc = NULL;
539
540 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
541
542 tx_desc = soc->tx_desc[desc_pool_id].freelist;
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +0530543
Leo Changc2a7b762016-09-26 13:15:41 -0700544 /* Pool is exhausted */
545 if (!tx_desc) {
546 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
547 return NULL;
548 }
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +0530549
550 soc->tx_desc[desc_pool_id].freelist =
551 soc->tx_desc[desc_pool_id].freelist->next;
552 soc->tx_desc[desc_pool_id].num_allocated++;
553 soc->tx_desc[desc_pool_id].num_free--;
Ishank Jainbc2d91f2017-01-03 18:14:54 +0530554
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530555 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
Pamidipati, Vijayf82fb2b2017-06-28 05:31:50 +0530556
Leo Changc2a7b762016-09-26 13:15:41 -0700557 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
558
559 return tx_desc;
560}
561
Ishank Jain5122f8f2017-03-15 22:22:47 +0530562/**
563 * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
564 * from given pool
565 * @soc: Handle to DP SoC structure
566 * @pool_id: pool id should pick up
567 * @num_requested: number of required descriptor
568 *
569 * allocate multiple tx descriptor and make a link
570 *
571 * Return: h_desc first descriptor pointer
572 */
573static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
574 struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
575{
576 struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
577 uint8_t count;
578
579 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
580
581 if ((num_requested == 0) ||
582 (soc->tx_desc[desc_pool_id].num_free < num_requested)) {
583 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
584 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
585 "%s, No Free Desc: Available(%d) num_requested(%d)",
586 __func__, soc->tx_desc[desc_pool_id].num_free,
587 num_requested);
588 return NULL;
589 }
590
591 h_desc = soc->tx_desc[desc_pool_id].freelist;
592
593 /* h_desc should never be NULL since num_free > requested */
594 qdf_assert_always(h_desc);
595
596 c_desc = h_desc;
597 for (count = 0; count < (num_requested - 1); count++) {
598 c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
599 c_desc = c_desc->next;
600 }
601 soc->tx_desc[desc_pool_id].num_free -= count;
602 soc->tx_desc[desc_pool_id].num_allocated += count;
603 soc->tx_desc[desc_pool_id].freelist = c_desc->next;
604 c_desc->next = NULL;
605
606 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
607 return h_desc;
608}
Leo Changc2a7b762016-09-26 13:15:41 -0700609
610/**
611 * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
612 *
613 * @soc Handle to DP SoC structure
614 * @pool_id
615 * @tx_desc
616 */
617static inline void
618dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
619 uint8_t desc_pool_id)
620{
621 TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
622
Pamidipati, Vijay12e8f332018-06-13 20:13:13 +0530623 tx_desc->vdev = NULL;
624 tx_desc->nbuf = NULL;
Vijay Pamidipati4d5d4362017-02-09 22:49:00 +0530625 tx_desc->flags = 0;
Leo Changc2a7b762016-09-26 13:15:41 -0700626 tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
627 soc->tx_desc[desc_pool_id].freelist = tx_desc;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530628 soc->tx_desc[desc_pool_id].num_allocated--;
629 soc->tx_desc[desc_pool_id].num_free++;
630
Leo Changc2a7b762016-09-26 13:15:41 -0700631
632 TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
633}
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700634#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
Leo Changc2a7b762016-09-26 13:15:41 -0700635
Jinwei Chenfc765552018-05-18 13:27:32 +0800636#ifdef QCA_DP_TX_DESC_ID_CHECK
637/**
638 * dp_tx_is_desc_id_valid() - check is the tx desc id valid
639 *
640 * @soc Handle to DP SoC structure
641 * @tx_desc_id
642 *
643 * Return: true or false
644 */
645static inline bool
646dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
647{
648 uint8_t pool_id;
649 uint16_t page_id, offset;
650 struct dp_tx_desc_pool_s *pool;
651
652 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
653 DP_TX_DESC_ID_POOL_OS;
654 /* Pool ID is out of limit */
655 if (pool_id > wlan_cfg_get_num_tx_desc_pool(
656 soc->wlan_cfg_ctx)) {
657 QDF_TRACE(QDF_MODULE_ID_DP,
658 QDF_TRACE_LEVEL_FATAL,
659 "%s:Tx Comp pool id %d not valid",
660 __func__,
661 pool_id);
662 goto warn_exit;
663 }
664
665 pool = &soc->tx_desc[pool_id];
666 /* the pool is freed */
667 if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
668 QDF_TRACE(QDF_MODULE_ID_DP,
669 QDF_TRACE_LEVEL_FATAL,
670 "%s:the pool %d has been freed",
671 __func__,
672 pool_id);
673 goto warn_exit;
674 }
675
676 page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
677 DP_TX_DESC_ID_PAGE_OS;
678 /* the page id is out of limit */
679 if (page_id >= pool->desc_pages.num_pages) {
680 QDF_TRACE(QDF_MODULE_ID_DP,
681 QDF_TRACE_LEVEL_FATAL,
682 "%s:the page id %d invalid, pool id %d, num_page %d",
683 __func__,
684 page_id,
685 pool_id,
686 pool->desc_pages.num_pages);
687 goto warn_exit;
688 }
689
690 offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
691 DP_TX_DESC_ID_OFFSET_OS;
692 /* the offset is out of limit */
693 if (offset >= pool->desc_pages.num_element_per_page) {
694 QDF_TRACE(QDF_MODULE_ID_DP,
695 QDF_TRACE_LEVEL_FATAL,
696 "%s:offset %d invalid, pool%d,num_elem_per_page %d",
697 __func__,
698 offset,
699 pool_id,
700 pool->desc_pages.num_element_per_page);
701 goto warn_exit;
702 }
703
704 return true;
705
706warn_exit:
707 QDF_TRACE(QDF_MODULE_ID_DP,
708 QDF_TRACE_LEVEL_FATAL,
709 "%s:Tx desc id 0x%x not valid",
710 __func__,
711 tx_desc_id);
712 qdf_assert_always(0);
713 return false;
714}
715
716#else
717static inline bool
718dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
719{
720 return true;
721}
722#endif /* QCA_DP_TX_DESC_ID_CHECK */
723
Leo Changc2a7b762016-09-26 13:15:41 -0700724/**
725 * dp_tx_desc_find() - find dp tx descriptor from cokie
726 * @soc - handle for the device sending the data
727 * @tx_desc_id - the ID of the descriptor in question
728 * @return the descriptor object that has the specified ID
729 *
730 * Use a tx descriptor ID to find the corresponding descriptor object.
731 *
732 */
733static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
734 uint8_t pool_id, uint16_t page_id, uint16_t offset)
735{
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700736 struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
737
738 return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
739 tx_desc_pool->elem_size * offset;
Leo Changc2a7b762016-09-26 13:15:41 -0700740}
741
742/**
743 * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
744 * @soc: handle for the device sending the data
745 * @pool_id: target pool id
746 *
747 * Return: None
748 */
749static inline
750struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
751 uint8_t desc_pool_id)
752{
753 struct dp_tx_ext_desc_elem_s *c_elem;
754
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800755 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Manjunathappa Prakash98b30de2017-11-22 14:09:33 -0800756 if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800757 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Manjunathappa Prakash98b30de2017-11-22 14:09:33 -0800758 return NULL;
759 }
Leo Changc2a7b762016-09-26 13:15:41 -0700760 c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
761 soc->tx_ext_desc[desc_pool_id].freelist =
762 soc->tx_ext_desc[desc_pool_id].freelist->next;
Manjunathappa Prakash98b30de2017-11-22 14:09:33 -0800763 soc->tx_ext_desc[desc_pool_id].num_free--;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800764 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700765 return c_elem;
766}
767
768/**
769 * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
770 * @soc: handle for the device sending the data
771 * @pool_id: target pool id
772 * @elem: ext descriptor pointer should release
773 *
774 * Return: None
775 */
776static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
777 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
778{
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800779 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700780 elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
781 soc->tx_ext_desc[desc_pool_id].freelist = elem;
Manjunathappa Prakash98b30de2017-11-22 14:09:33 -0800782 soc->tx_ext_desc[desc_pool_id].num_free++;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800783 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700784 return;
785}
Ishank Jain5122f8f2017-03-15 22:22:47 +0530786
787/**
788 * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
789 * attach it to free list
790 * @soc: Handle to DP SoC structure
791 * @desc_pool_id: pool id should pick up
792 * @elem: tx descriptor should be freed
793 * @num_free: number of descriptors should be freed
794 *
795 * Return: none
796 */
797static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
798 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
799 uint8_t num_free)
800{
801 struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
802 uint8_t freed = num_free;
803
804 /* caller should always guarantee atleast list of num_free nodes */
Jianmin Zhu7ec85482019-11-12 21:22:18 +0800805 qdf_assert_always(elem);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530806
807 head = elem;
808 c_elem = head;
809 tail = head;
810 while (c_elem && freed) {
811 tail = c_elem;
812 c_elem = c_elem->next;
813 freed--;
814 }
815
816 /* caller should always guarantee atleast list of num_free nodes */
817 qdf_assert_always(tail);
818
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800819 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530820 tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
821 soc->tx_ext_desc[desc_pool_id].freelist = head;
822 soc->tx_ext_desc[desc_pool_id].num_free += num_free;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800823 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530824
825 return;
826}
827
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700828#if defined(FEATURE_TSO)
Ishank Jain5122f8f2017-03-15 22:22:47 +0530829/**
830 * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
831 * @soc: device soc instance
832 * @pool_id: pool id should pick up tso descriptor
833 *
834 * Allocates a TSO segment element from the free list held in
835 * the soc
836 *
837 * Return: tso_seg, tso segment memory pointer
838 */
839static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
840 struct dp_soc *soc, uint8_t pool_id)
841{
842 struct qdf_tso_seg_elem_t *tso_seg = NULL;
843
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800844 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530845 if (soc->tx_tso_desc[pool_id].freelist) {
846 soc->tx_tso_desc[pool_id].num_free--;
847 tso_seg = soc->tx_tso_desc[pool_id].freelist;
848 soc->tx_tso_desc[pool_id].freelist =
849 soc->tx_tso_desc[pool_id].freelist->next;
850 }
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800851 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530852
853 return tso_seg;
854}
855
856/**
857 * dp_tx_tso_desc_free() - function to free a TSO segment
858 * @soc: device soc instance
859 * @pool_id: pool id should pick up tso descriptor
860 * @tso_seg: tso segment memory pointer
861 *
862 * Returns a TSO segment element to the free list held in the
863 * HTT pdev
864 *
865 * Return: none
866 */
Ishank Jain5122f8f2017-03-15 22:22:47 +0530867static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
868 uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
869{
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800870 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530871 tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
872 soc->tx_tso_desc[pool_id].freelist = tso_seg;
873 soc->tx_tso_desc[pool_id].num_free++;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800874 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530875}
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700876
877static inline
878struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
879 uint8_t pool_id)
880{
881 struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
882
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800883 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700884 if (soc->tx_tso_num_seg[pool_id].freelist) {
885 soc->tx_tso_num_seg[pool_id].num_free--;
886 tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
887 soc->tx_tso_num_seg[pool_id].freelist =
888 soc->tx_tso_num_seg[pool_id].freelist->next;
889 }
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800890 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700891
892 return tso_num_seg;
893}
894
895static inline
896void dp_tso_num_seg_free(struct dp_soc *soc,
897 uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
898{
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800899 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700900 tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
901 soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
902 soc->tx_tso_num_seg[pool_id].num_free++;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800903 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700904}
905#endif
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700906
Ishank Jainc838b132017-02-17 11:08:18 +0530907/*
908 * dp_tx_me_alloc_buf() Alloc descriptor from me pool
909 * @pdev DP_PDEV handle for datapath
910 *
911 * Return:dp_tx_me_buf_t(buf)
912 */
913static inline struct dp_tx_me_buf_t*
914dp_tx_me_alloc_buf(struct dp_pdev *pdev)
915{
916 struct dp_tx_me_buf_t *buf = NULL;
917 qdf_spin_lock_bh(&pdev->tx_mutex);
918 if (pdev->me_buf.freelist) {
919 buf = pdev->me_buf.freelist;
920 pdev->me_buf.freelist = pdev->me_buf.freelist->next;
921 pdev->me_buf.buf_in_use++;
922 } else {
923 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
924 "Error allocating memory in pool");
925 qdf_spin_unlock_bh(&pdev->tx_mutex);
926 return NULL;
927 }
928 qdf_spin_unlock_bh(&pdev->tx_mutex);
929 return buf;
930}
931
932/*
933 * dp_tx_me_free_buf() - Free me descriptor and add it to pool
934 * @pdev: DP_PDEV handle for datapath
935 * @buf : Allocated ME BUF
936 *
937 * Return:void
938 */
939static inline void
940dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
941{
942 qdf_spin_lock_bh(&pdev->tx_mutex);
943 buf->next = pdev->me_buf.freelist;
944 pdev->me_buf.freelist = buf;
945 pdev->me_buf.buf_in_use--;
946 qdf_spin_unlock_bh(&pdev->tx_mutex);
947}
Leo Changc2a7b762016-09-26 13:15:41 -0700948#endif /* DP_TX_DESC_H */