blob: df687c251af79e4851ddd0863fb6de30cc9b3c12 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnson5ead5ab2018-05-06 00:11:08 -07002 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080019/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053020#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053021#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053022#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080023
24/* APIs for other modules */
25#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
26#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080027
28/* internal header files relevant for all systems */
29#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080030#include <ol_tx_desc.h> /* ol_tx_desc */
31#include <ol_tx_send.h> /* ol_tx_send */
Dhanashri Atre12a08392016-02-17 13:10:34 -080032#include <ol_txrx.h> /* ol_txrx_get_vdev_from_vdev_id */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080033
34/* internal header files relevant only for HL systems */
35#include <ol_tx_queue.h> /* ol_tx_enqueue */
36
37/* internal header files relevant only for specific systems (Pronto) */
38#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
39#include <ol_tx.h>
40#include <ol_cfg.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080041#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042#define INVALID_FLOW_ID 0xFF
43#define MAX_INVALID_BIN 3
44
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
46#define TX_FLOW_MGMT_POOL_ID 0xEF
47#define TX_FLOW_MGMT_POOL_SIZE 32
48
49/**
50 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
51 * @pdev: pdev handler
52 *
53 * Return: none
54 */
55static void
56ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
57{
58 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
59 TX_FLOW_MGMT_POOL_SIZE);
Yun Park29747c32017-04-05 12:05:58 -070060 if (!pdev->mgmt_pool)
Poddar, Siddarth14521792017-03-14 21:19:42 +053061 ol_txrx_err("Management pool creation failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080062}
63
64/**
65 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
66 * @pdev: pdev handler
67 *
68 * Return: none
69 */
70static void
71ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
72{
Himanshu Agarwal7d367c12017-03-30 17:16:55 +053073 ol_tx_dec_pool_ref(pdev->mgmt_pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080074}
75#else
76static inline void
77ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
78{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080079}
80static inline void
81ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
82{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083}
84#endif
85
Nirav Shahc4aa1ab2018-04-21 12:38:44 +053086bool
87ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev)
88{
89 struct ol_tx_flow_pool_t *pool;
90 bool enough_desc_flag;
91
92 if (!vdev)
93 return false;
94
95 pool = vdev->pool;
96
97 if (!pool)
98 return false;
99
100 qdf_spin_lock_bh(&pool->flow_pool_lock);
101 enough_desc_flag = (pool->avail_desc < (pool->stop_th +
102 OL_TX_NON_FWD_RESERVE))
103 ? false : true;
104 qdf_spin_unlock_bh(&pool->flow_pool_lock);
105 return enough_desc_flag;
106}
107
108/**
109 * ol_txrx_register_pause_cb() - register pause callback
110 * @pause_cb: pause callback
111 *
112 * Return: QDF status
113 */
114QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
115 tx_pause_callback pause_cb)
116{
117 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
118
119 if (!pdev || !pause_cb) {
120 ol_txrx_err("pdev or pause_cb is NULL");
121 return QDF_STATUS_E_INVAL;
122 }
123 pdev->pause_cb = pause_cb;
124 return QDF_STATUS_SUCCESS;
125}
126
127/**
128 * ol_tx_set_desc_global_pool_size() - set global pool size
129 * @num_msdu_desc: total number of descriptors
130 *
131 * Return: none
132 */
133void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
134{
135 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
136
137 if (!pdev) {
138 qdf_print("%s: pdev is NULL\n", __func__);
139 return;
140 }
141 pdev->num_msdu_desc = num_msdu_desc;
142 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
143 pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
144 ol_txrx_info_high("Global pool size: %d\n", pdev->num_msdu_desc);
145}
146
147/**
148 * ol_tx_get_total_free_desc() - get total free descriptors
149 * @pdev: pdev handle
150 *
151 * Return: total free descriptors
152 */
153uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
154{
155 struct ol_tx_flow_pool_t *pool = NULL;
156 uint32_t free_desc;
157
158 free_desc = pdev->tx_desc.num_free;
159 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
160 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
161 flow_pool_list_elem) {
162 qdf_spin_lock_bh(&pool->flow_pool_lock);
163 free_desc += pool->avail_desc;
164 qdf_spin_unlock_bh(&pool->flow_pool_lock);
165 }
166 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
167
168 return free_desc;
169}
170
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800171/**
172 * ol_tx_register_flow_control() - Register fw based tx flow control
173 * @pdev: pdev handle
174 *
175 * Return: none
176 */
177void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
178{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530179 qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800180 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
181
Nirav Shah22bf44d2015-12-10 15:39:48 +0530182 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
183 ol_tx_register_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800184}
185
186/**
187 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
188 * @pdev: pdev handle
189 *
190 * Return: none
191 */
192void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
193{
Nirav Shah7a0a9052016-04-14 16:52:21 +0530194 int i = 0;
195 struct ol_tx_flow_pool_t *pool = NULL;
196
Nirav Shah22bf44d2015-12-10 15:39:48 +0530197 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
198 ol_tx_deregister_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800199
Nirav Shah7a0a9052016-04-14 16:52:21 +0530200 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
201 while (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
202 pool = TAILQ_FIRST(&pdev->tx_desc.flow_pool_list);
203 if (!pool)
204 break;
205 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530206 ol_txrx_info("flow pool list is not empty %d!!!\n", i++);
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700207
Nirav Shah7a0a9052016-04-14 16:52:21 +0530208 if (i == 1)
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700209 ol_tx_dump_flow_pool_info((void *)pdev);
210
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530211 ol_tx_dec_pool_ref(pool, true);
Nirav Shah7a0a9052016-04-14 16:52:21 +0530212 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213 }
Nirav Shah7a0a9052016-04-14 16:52:21 +0530214 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
215 qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800216}
217
218/**
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530219 * ol_tx_delete_flow_pool() - delete flow pool
220 * @pool: flow pool pointer
221 * @force: free pool forcefully
222 *
223 * Delete flow_pool if all tx descriptors are available.
224 * Otherwise put it in FLOW_POOL_INVALID state.
225 * If force is set then pull all available descriptors to
226 * global pool.
227 *
228 * Return: 0 for success or error
229 */
230static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
231{
232 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
233 uint16_t i, size;
234 union ol_tx_desc_list_elem_t *temp_list = NULL;
235 struct ol_tx_desc_t *tx_desc = NULL;
236
237 if (!pool) {
238 ol_txrx_err(
239 "%s: pool is NULL\n", __func__);
240 QDF_ASSERT(0);
241 return -ENOMEM;
242 }
243 if (!pdev) {
244 ol_txrx_err(
245 "%s: pdev is NULL\n", __func__);
246 QDF_ASSERT(0);
247 return -ENOMEM;
248 }
249
250 qdf_spin_lock_bh(&pool->flow_pool_lock);
251 if (pool->avail_desc == pool->flow_pool_size || force == true)
252 pool->status = FLOW_POOL_INACTIVE;
253 else
254 pool->status = FLOW_POOL_INVALID;
255
256 /* Take all free descriptors and put it in temp_list */
257 temp_list = pool->freelist;
258 size = pool->avail_desc;
259 pool->freelist = NULL;
260 pool->avail_desc = 0;
261
262 if (pool->status == FLOW_POOL_INACTIVE) {
263 qdf_spin_unlock_bh(&pool->flow_pool_lock);
264 /* Free flow_pool */
265 qdf_spinlock_destroy(&pool->flow_pool_lock);
266 qdf_mem_free(pool);
267 } else { /* FLOW_POOL_INVALID case*/
268 pool->flow_pool_size -= size;
269 pool->flow_pool_id = INVALID_FLOW_ID;
270 qdf_spin_unlock_bh(&pool->flow_pool_lock);
271 ol_tx_inc_pool_ref(pool);
272
273 pdev->tx_desc.num_invalid_bin++;
274 ol_txrx_info(
275 "%s: invalid pool created %d\n",
276 __func__, pdev->tx_desc.num_invalid_bin);
277 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
278 ASSERT(0);
279
280 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
281 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
282 flow_pool_list_elem);
283 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
284 }
285
286 /* put free descriptors to global pool */
287 qdf_spin_lock_bh(&pdev->tx_mutex);
288 for (i = 0; i < size; i++) {
289 tx_desc = &temp_list->tx_desc;
290 temp_list = temp_list->next;
291
292 ol_tx_put_desc_global_pool(pdev, tx_desc);
293 }
294 qdf_spin_unlock_bh(&pdev->tx_mutex);
295
Nirav Shaha3cc7192018-03-23 00:03:24 +0530296 ol_tx_distribute_descs_to_deficient_pools_from_global_pool();
297
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530298 return 0;
299}
300
301QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
302{
303 if (!pool) {
304 ol_txrx_err("flow pool is NULL");
305 return QDF_STATUS_E_INVAL;
306 }
307
308 qdf_spin_lock_bh(&pool->flow_pool_lock);
309 qdf_atomic_inc(&pool->ref_cnt);
310 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700311 ol_txrx_dbg("pool %pK, ref_cnt %x",
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530312 pool, qdf_atomic_read(&pool->ref_cnt));
313
314 return QDF_STATUS_SUCCESS;
315}
316
317QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
318{
319 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
320
321 if (!pool) {
322 ol_txrx_err("flow pool is NULL");
323 QDF_ASSERT(0);
324 return QDF_STATUS_E_INVAL;
325 }
326
327 if (!pdev) {
328 ol_txrx_err("pdev is NULL");
329 QDF_ASSERT(0);
330 return QDF_STATUS_E_INVAL;
331 }
332
333 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
334 qdf_spin_lock_bh(&pool->flow_pool_lock);
335 if (qdf_atomic_dec_and_test(&pool->ref_cnt)) {
336 qdf_spin_unlock_bh(&pool->flow_pool_lock);
337 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool,
338 flow_pool_list_elem);
339 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700340 ol_txrx_dbg("Deleting pool %pK", pool);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530341 ol_tx_delete_flow_pool(pool, force);
342 } else {
343 qdf_spin_unlock_bh(&pool->flow_pool_lock);
344 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700345 ol_txrx_dbg("pool %pK, ref_cnt %x",
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530346 pool, qdf_atomic_read(&pool->ref_cnt));
347 }
348
349 return QDF_STATUS_SUCCESS;
350}
351
352/**
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530353 * ol_tx_flow_pool_status_to_str() - convert flow pool status to string
354 * @status - flow pool status
355 *
356 * Returns: String corresponding to flow pool status
357 */
358static const char *ol_tx_flow_pool_status_to_str
359 (enum flow_pool_status status)
360{
361 switch (status) {
362 CASE_RETURN_STRING(FLOW_POOL_ACTIVE_UNPAUSED);
363 CASE_RETURN_STRING(FLOW_POOL_ACTIVE_PAUSED);
364 CASE_RETURN_STRING(FLOW_POOL_NON_PRIO_PAUSED);
365 CASE_RETURN_STRING(FLOW_POOL_INVALID);
366 CASE_RETURN_STRING(FLOW_POOL_INACTIVE);
367 default:
368 return "unknown";
369 }
370}
371
372/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
Manjunathappa Prakash7ed5ef52018-05-10 19:45:15 -0700374 * @ctx: cdp_soc context, required only in lithium_dp flow control.
375 * Remove void * while cleaning up cds_get_context.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 *
377 * Return: none
378 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700379void ol_tx_dump_flow_pool_info(void *ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380{
Manjunathappa Prakash7ed5ef52018-05-10 19:45:15 -0700381 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530382 struct ol_tx_flow_pool_t *pool = NULL, *pool_prev = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383 struct ol_tx_flow_pool_t tmp_pool;
384
Mohit Khanna69672c02017-10-04 18:58:32 -0700385
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530386 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530387 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530388 QDF_ASSERT(0); /* traceback */
389 return;
390 }
Nirav Shaha3cc7192018-03-23 00:03:24 +0530391
Mohit Khanna69672c02017-10-04 18:58:32 -0700392 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shaha3cc7192018-03-23 00:03:24 +0530393 "Global total %d :: avail %d invalid flow_pool %d ",
Mohit Khanna69672c02017-10-04 18:58:32 -0700394 pdev->tx_desc.pool_size,
395 pdev->tx_desc.num_free,
Nirav Shaha3cc7192018-03-23 00:03:24 +0530396 pdev->tx_desc.num_invalid_bin);
397
398 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
399 "maps %d pool unmaps %d pool resize %d pkt drops %d",
Mohit Khanna69672c02017-10-04 18:58:32 -0700400 pdev->pool_stats.pool_map_count,
401 pdev->pool_stats.pool_unmap_count,
Nirav Shaha3cc7192018-03-23 00:03:24 +0530402 pdev->pool_stats.pool_resize_count,
Mohit Khanna69672c02017-10-04 18:58:32 -0700403 pdev->pool_stats.pkt_drop_no_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 /*
405 * Nested spin lock.
406 * Always take in below order.
407 * flow_pool_list_lock -> flow_pool_lock
408 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530409 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800410 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
411 flow_pool_list_elem) {
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530412 ol_tx_inc_pool_ref(pool);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530413 qdf_spin_lock_bh(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530414 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530415 qdf_spin_unlock_bh(&pool->flow_pool_lock);
416 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530417
418 if (pool_prev)
419 ol_tx_dec_pool_ref(pool_prev, false);
420
Mohit Khanna69672c02017-10-04 18:58:32 -0700421 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
422 "flow_pool_id %d ::", tmp_pool.flow_pool_id);
423 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
424 "status %s flow_id %d flow_type %d",
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530425 ol_tx_flow_pool_status_to_str(tmp_pool.status),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800426 tmp_pool.member_flow_id, tmp_pool.flow_type);
Mohit Khanna69672c02017-10-04 18:58:32 -0700427 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
Nirav Shaha3cc7192018-03-23 00:03:24 +0530428 "total %d :: available %d :: deficient %d :: overflow %d :: pkt dropped (no desc) %d",
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530429 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
430 tmp_pool.deficient_desc,
Nirav Shaha3cc7192018-03-23 00:03:24 +0530431 tmp_pool.overflow_desc,
Nirav Shahda008342016-05-17 18:50:40 +0530432 tmp_pool.pkt_drop_no_desc);
Mohit Khanna69672c02017-10-04 18:58:32 -0700433 ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530434 "thresh: start %d stop %d prio start %d prio stop %d",
435 tmp_pool.start_th, tmp_pool.stop_th,
436 tmp_pool.start_priority_th, tmp_pool.stop_priority_th);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530437 pool_prev = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530438 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800439 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530440 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800441
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530442 /* decrement ref count for last pool in list */
443 if (pool_prev)
444 ol_tx_dec_pool_ref(pool_prev, false);
445
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446}
447
448/**
449 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
450 *
451 * Return: none
452 */
453void ol_tx_clear_flow_pool_stats(void)
454{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530455 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800456
457 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530458 ol_txrx_err("%s: pdev is null\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800459 __func__);
460 return;
461 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530462 qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800463}
464
465/**
466 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
467 * @src_pool: source pool
468 * @dst_pool: destination pool
469 * @desc_move_count: descriptor move count
470 *
471 * Return: actual descriptors moved
472 */
473static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
474 struct ol_tx_flow_pool_t *dst_pool,
475 int desc_move_count)
476{
477 uint16_t count = 0, i;
478 struct ol_tx_desc_t *tx_desc;
479 union ol_tx_desc_list_elem_t *temp_list = NULL;
480
481 /* Take descriptors from source pool and put it in temp_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530482 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483 for (i = 0; i < desc_move_count; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700484 tx_desc = ol_tx_get_desc_flow_pool(src_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
486 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
487
488 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530489 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490
491 /* Take descriptors from temp_list and put it in destination pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530492 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800493 for (i = 0; i < desc_move_count; i++) {
494 if (dst_pool->deficient_desc)
495 dst_pool->deficient_desc--;
496 else
497 break;
498 tx_desc = &temp_list->tx_desc;
499 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700500 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 count++;
502 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530503 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504
505 /* If anything is there in temp_list put it back to source pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530506 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800507 while (temp_list) {
508 tx_desc = &temp_list->tx_desc;
509 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700510 ol_tx_put_desc_flow_pool(src_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530512 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513
514 return count;
515}
516
517
518/**
519 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
520 * @src_pool: source pool
521 *
522 * Distribute all descriptors of source pool to all
523 * deficient pools as per flow_pool_list.
524 *
Jeff Johnson5ead5ab2018-05-06 00:11:08 -0700525 * Return: 0 for success
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700527static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
529{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530530 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800531 struct ol_tx_flow_pool_t *dst_pool = NULL;
532 uint16_t desc_count = src_pool->avail_desc;
533 uint16_t desc_move_count = 0;
534
535 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530536 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 "%s: pdev is NULL\n", __func__);
538 return -EINVAL;
539 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530540 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800541 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
542 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530543 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544 if (dst_pool->deficient_desc) {
545 desc_move_count =
546 (dst_pool->deficient_desc > desc_count) ?
547 desc_count : dst_pool->deficient_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530548 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800549 desc_move_count = ol_tx_move_desc_n(src_pool,
550 dst_pool, desc_move_count);
551 desc_count -= desc_move_count;
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530552
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530553 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530554 if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
555 if (dst_pool->avail_desc > dst_pool->start_th) {
556 pdev->pause_cb(dst_pool->member_flow_id,
557 WLAN_WAKE_ALL_NETIF_QUEUE,
558 WLAN_DATA_FLOW_CONTROL);
559 dst_pool->status =
560 FLOW_POOL_ACTIVE_UNPAUSED;
561 }
562 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800563 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530564 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800565 if (desc_count == 0)
566 break;
567 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530568 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569
570 return 0;
571}
572
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573/**
574 * ol_tx_create_flow_pool() - create flow pool
575 * @flow_pool_id: flow pool id
576 * @flow_pool_size: flow pool size
577 *
578 * Return: flow_pool pointer / NULL for error
579 */
580struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
581 uint16_t flow_pool_size)
582{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530583 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584 struct ol_tx_flow_pool_t *pool;
585 uint16_t size = 0, i;
586 struct ol_tx_desc_t *tx_desc;
587 union ol_tx_desc_list_elem_t *temp_list = NULL;
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530588 uint32_t stop_threshold;
589 uint32_t start_threshold;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590
591 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530592 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800593 "%s: pdev is NULL\n", __func__);
594 return NULL;
595 }
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530596 stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
597 start_threshold = stop_threshold +
598 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530599 pool = qdf_mem_malloc(sizeof(*pool));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800600 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530601 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 "%s: malloc failed\n", __func__);
603 return NULL;
604 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800605 pool->flow_pool_id = flow_pool_id;
606 pool->flow_pool_size = flow_pool_size;
607 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
608 pool->start_th = (start_threshold * flow_pool_size)/100;
609 pool->stop_th = (stop_threshold * flow_pool_size)/100;
Rakesh Pillai3e534db2017-09-26 18:59:43 +0530610 pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th)/100;
611 if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
612 pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
613
614 pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th)/100;
615 if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
616 pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
617
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530618 qdf_spinlock_create(&pool->flow_pool_lock);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530619 qdf_atomic_init(&pool->ref_cnt);
620 ol_tx_inc_pool_ref(pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800621
622 /* Take TX descriptor from global_pool and put it in temp_list*/
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530623 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800624 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
625 size = pool->flow_pool_size;
626 else
627 size = pdev->tx_desc.num_free;
628
629 for (i = 0; i < size; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700630 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631 tx_desc->pool = pool;
632 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
633 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
634
635 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530636 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637
638 /* put temp_list to flow_pool */
639 pool->freelist = temp_list;
640 pool->avail_desc = size;
641 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
Nirav Shaha3cc7192018-03-23 00:03:24 +0530642 /* used for resize pool*/
643 pool->overflow_desc = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800644
645 /* Add flow_pool to flow_pool_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530646 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800647 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
648 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530649 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800650
651 return pool;
652}
653
654/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655 * ol_tx_free_invalid_flow_pool() - free invalid pool
656 * @pool: pool
657 *
658 * Return: 0 for success or failure
659 */
660int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
661{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530662 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800663
664 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530665 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666 "%s: Invalid pool/pdev\n", __func__);
667 return -EINVAL;
668 }
669
670 /* direclty distribute to other deficient pools */
671 ol_tx_distribute_descs_to_deficient_pools(pool);
672
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530673 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 pool->flow_pool_size = pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530675 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676
677 pdev->tx_desc.num_invalid_bin--;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530678 ol_txrx_info(
Nirav Shah2ae038d2015-12-23 20:36:11 +0530679 "%s: invalid pool deleted %d\n",
680 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800681
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530682 return ol_tx_dec_pool_ref(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800683}
684
685/**
686 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
687 * @flow_pool_id: flow pool id
688 *
689 * Return: flow_pool ptr / NULL if not found
690 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700691static struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800692{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530693 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800694 struct ol_tx_flow_pool_t *pool = NULL;
695 bool is_found = false;
696
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530697 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530698 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530699 QDF_ASSERT(0); /* traceback */
700 return NULL;
701 }
702
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530703 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800704 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
705 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530706 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800707 if (pool->flow_pool_id == flow_pool_id) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530708 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 is_found = true;
710 break;
711 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530712 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800713 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530714 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800715
716 if (is_found == false)
717 pool = NULL;
718
719 return pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720}
721
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800722/**
723 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
724 * @pool: flow_pool
725 * @vdev_id: flow_id /vdev_id
726 *
727 * Return: none
728 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700729static void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
730 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800731{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800732 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800734 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530736 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800737 "%s: invalid vdev_id %d\n",
738 __func__, vdev_id);
739 return;
740 }
741
742 vdev->pool = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530743 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744 pool->member_flow_id = vdev_id;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530745 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800746}
747
748/**
749 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
750 * @pool: flow_pool
751 * @vdev_id: flow_id /vdev_id
752 *
753 * Return: none
754 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700755static void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
756 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800757{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800758 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800759
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800760 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530762 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800763 "%s: invalid vdev_id %d\n",
764 __func__, vdev_id);
765 return;
766 }
767
768 vdev->pool = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530769 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800770 pool->member_flow_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530771 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800772}
773
774/**
775 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
776 * @flow_id: flow id
777 * @flow_type: flow type
778 * @flow_pool_id: pool id
779 * @flow_pool_size: pool size
780 *
781 * Process below target to host message
782 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
783 *
784 * Return: none
785 */
786void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
787 uint8_t flow_pool_id, uint16_t flow_pool_size)
788{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530789 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800790 struct ol_tx_flow_pool_t *pool;
791 uint8_t pool_create = 0;
792 enum htt_flow_type type = flow_type;
793
Poddar, Siddarth14521792017-03-14 21:19:42 +0530794 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800795 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
796 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
797
Anurag Chouhanc5548422016-02-24 18:33:27 +0530798 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530799 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800800 "%s: pdev is NULL", __func__);
801 return;
802 }
803 pdev->pool_stats.pool_map_count++;
804
805 pool = ol_tx_get_flow_pool(flow_pool_id);
806 if (!pool) {
807 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
808 if (pool == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530809 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800810 "%s: creation of flow_pool %d size %d failed\n",
811 __func__, flow_pool_id, flow_pool_size);
812 return;
813 }
814 pool_create = 1;
815 }
816
817 switch (type) {
818
819 case FLOW_TYPE_VDEV:
820 ol_tx_flow_pool_vdev_map(pool, flow_id);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530821 pdev->pause_cb(flow_id,
822 WLAN_WAKE_ALL_NETIF_QUEUE,
823 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800824 break;
825 default:
826 if (pool_create)
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530827 ol_tx_dec_pool_ref(pool, false);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530828 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 "%s: flow type %d not supported !!!\n",
830 __func__, type);
831 break;
832 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800833}
834
835/**
836 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
837 * @flow_id: flow id
838 * @flow_type: flow type
839 * @flow_pool_id: pool id
840 *
841 * Process below target to host message
842 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
843 *
844 * Return: none
845 */
846void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
847 uint8_t flow_pool_id)
848{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530849 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850 struct ol_tx_flow_pool_t *pool;
851 enum htt_flow_type type = flow_type;
852
Poddar, Siddarth14521792017-03-14 21:19:42 +0530853 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800854 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
855 __func__, flow_id, flow_type, flow_pool_id);
856
Anurag Chouhanc5548422016-02-24 18:33:27 +0530857 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530858 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800859 "%s: pdev is NULL", __func__);
860 return;
861 }
862 pdev->pool_stats.pool_unmap_count++;
863
864 pool = ol_tx_get_flow_pool(flow_pool_id);
865 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530866 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800867 "%s: flow_pool not available flow_pool_id %d\n",
868 __func__, type);
869 return;
870 }
871
872 switch (type) {
873
874 case FLOW_TYPE_VDEV:
875 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
876 break;
877 default:
Poddar, Siddarth14521792017-03-14 21:19:42 +0530878 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800879 "%s: flow type %d not supported !!!\n",
880 __func__, type);
881 return;
882 }
883
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530884 /*
885 * only delete if all descriptors are available
886 * and pool ref count becomes 0
887 */
888 ol_tx_dec_pool_ref(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800889}
890
Nirav Shaha3cc7192018-03-23 00:03:24 +0530891#ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
892/**
893 * ol_tx_distribute_descs_to_deficient_pools_from_global_pool()
894 *
895 * Distribute descriptors of global pool to all
896 * deficient pools as per need.
897 *
898 * Return: 0 for success
899 */
900int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
901{
902 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
903 struct ol_tx_flow_pool_t *dst_pool = NULL;
904 struct ol_tx_flow_pool_t *tmp_pool = NULL;
905 uint16_t total_desc_req = 0;
906 uint16_t desc_move_count = 0;
907 uint16_t temp_count = 0, i;
908 union ol_tx_desc_list_elem_t *temp_list = NULL;
909 struct ol_tx_desc_t *tx_desc;
910 uint8_t free_invalid_pool = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800911
Nirav Shaha3cc7192018-03-23 00:03:24 +0530912 if (!pdev) {
913 ol_txrx_err(
914 "%s: pdev is NULL\n", __func__);
915 return -EINVAL;
916 }
917
918 /* Nested locks: maintain flow_pool_list_lock->flow_pool_lock */
919 /* find out total deficient desc required */
920 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
921 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
922 flow_pool_list_elem) {
923 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
924 total_desc_req += dst_pool->deficient_desc;
925 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
926 }
927 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
928
929 qdf_spin_lock_bh(&pdev->tx_mutex);
930 desc_move_count = (pdev->tx_desc.num_free >= total_desc_req) ?
931 total_desc_req : pdev->tx_desc.num_free;
932
933 for (i = 0; i < desc_move_count; i++) {
934 tx_desc = ol_tx_get_desc_global_pool(pdev);
935 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
936 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
937 }
938 qdf_spin_unlock_bh(&pdev->tx_mutex);
939
940 if (!desc_move_count)
941 return 0;
942
943 /* destribute desc to deficient pool */
944 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
945 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
946 flow_pool_list_elem) {
947 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
948 if (dst_pool->deficient_desc) {
949 temp_count =
950 (dst_pool->deficient_desc > desc_move_count) ?
951 desc_move_count : dst_pool->deficient_desc;
952
953 desc_move_count -= temp_count;
954 for (i = 0; i < temp_count; i++) {
955 tx_desc = &temp_list->tx_desc;
956 temp_list = temp_list->next;
957 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
958 }
959
960 if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
961 if (dst_pool->avail_desc > dst_pool->start_th) {
962 pdev->pause_cb(dst_pool->member_flow_id,
963 WLAN_WAKE_ALL_NETIF_QUEUE,
964 WLAN_DATA_FLOW_CONTROL);
965 dst_pool->status =
966 FLOW_POOL_ACTIVE_UNPAUSED;
967 }
968 } else if ((dst_pool->status == FLOW_POOL_INVALID) &&
969 (dst_pool->avail_desc ==
970 dst_pool->flow_pool_size)) {
971 free_invalid_pool = 1;
972 tmp_pool = dst_pool;
973 }
974 }
975 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
976 if (desc_move_count == 0)
977 break;
978 }
979 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
980
981 if (free_invalid_pool && tmp_pool)
982 ol_tx_free_invalid_flow_pool(tmp_pool);
983
984 return 0;
985}
986
987/**
988 * ol_tx_flow_pool_update_queue_state() - update network queue for pool based on
989 * new available count.
990 * @pool : pool handle
991 *
992 * Return : none
993 */
994static void ol_tx_flow_pool_update_queue_state(struct ol_txrx_pdev_t *pdev,
995 struct ol_tx_flow_pool_t *pool)
996{
997 qdf_spin_lock_bh(&pool->flow_pool_lock);
998 if (pool->avail_desc > pool->start_th) {
999 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
1000 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1001 pdev->pause_cb(pool->member_flow_id,
1002 WLAN_WAKE_ALL_NETIF_QUEUE,
1003 WLAN_DATA_FLOW_CONTROL);
1004 } else if (pool->avail_desc < pool->stop_th &&
1005 pool->avail_desc >= pool->stop_priority_th) {
1006 pool->status = FLOW_POOL_NON_PRIO_PAUSED;
1007 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1008 pdev->pause_cb(pool->member_flow_id,
1009 WLAN_STOP_NON_PRIORITY_QUEUE,
1010 WLAN_DATA_FLOW_CONTROL);
1011 pdev->pause_cb(pool->member_flow_id,
1012 WLAN_NETIF_PRIORITY_QUEUE_ON,
1013 WLAN_DATA_FLOW_CONTROL);
1014 } else if (pool->avail_desc < pool->stop_priority_th) {
1015 pool->status = FLOW_POOL_ACTIVE_PAUSED;
1016 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1017 pdev->pause_cb(pool->member_flow_id,
1018 WLAN_STOP_ALL_NETIF_QUEUE,
1019 WLAN_DATA_FLOW_CONTROL);
1020 } else {
1021 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1022 }
1023}
1024
1025/**
1026 * ol_tx_flow_pool_update() - update pool parameters with new size
1027 * @pool : pool handle
1028 * @new_pool_size : new pool size
1029 * @deficient_count : deficient count
1030 * @overflow_count : overflow count
1031 *
1032 * Return : none
1033 */
1034static void ol_tx_flow_pool_update(struct ol_tx_flow_pool_t *pool,
1035 uint16_t new_pool_size,
1036 uint16_t deficient_count,
1037 uint16_t overflow_count)
1038{
1039 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1040 uint32_t stop_threshold =
1041 ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
1042 uint32_t start_threshold = stop_threshold +
1043 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
1044
1045 pool->flow_pool_size = new_pool_size;
1046 pool->start_th = (start_threshold * new_pool_size) / 100;
1047 pool->stop_th = (stop_threshold * new_pool_size) / 100;
1048 pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th) / 100;
1049 if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
1050 pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
1051
1052 pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th) / 100;
1053 if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
1054 pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
1055
1056 if (deficient_count)
1057 pool->deficient_desc = deficient_count;
1058
1059 if (overflow_count)
1060 pool->overflow_desc = overflow_count;
1061}
1062
1063/**
1064 * ol_tx_flow_pool_resize() - resize pool with new size
1065 * @pool: pool pointer
1066 * @new_pool_size: new pool size
1067 *
1068 * Return: none
1069 */
1070static void ol_tx_flow_pool_resize(struct ol_tx_flow_pool_t *pool,
1071 uint16_t new_pool_size)
1072{
1073 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1074 uint16_t diff = 0, overflow_count = 0, deficient_count = 0;
1075 uint16_t move_desc_to_global = 0, move_desc_from_global = 0;
1076 union ol_tx_desc_list_elem_t *temp_list = NULL;
1077 int i = 0, update_done = 0;
1078 struct ol_tx_desc_t *tx_desc = NULL;
1079 uint16_t temp = 0;
1080
1081 qdf_spin_lock_bh(&pool->flow_pool_lock);
1082 if (pool->flow_pool_size == new_pool_size) {
1083 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1084 ol_txrx_info("pool resize received with same size");
1085 return;
1086 }
1087 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1088
1089 /* Reduce pool size */
1090 /* start_priority_th desc should available after reduction */
1091 qdf_spin_lock_bh(&pool->flow_pool_lock);
1092 if (pool->flow_pool_size > new_pool_size) {
1093 diff = pool->flow_pool_size - new_pool_size;
1094 diff += pool->overflow_desc;
1095 pool->overflow_desc = 0;
1096 temp = QDF_MIN(pool->deficient_desc, diff);
1097 pool->deficient_desc -= temp;
1098 diff -= temp;
1099
1100 if (diff) {
1101 /* Have enough descriptors */
1102 if (pool->avail_desc >=
1103 (diff + pool->start_priority_th)) {
1104 move_desc_to_global = diff;
1105 }
1106 /* Do not have enough descriptors */
1107 else if (pool->avail_desc > pool->start_priority_th) {
1108 move_desc_to_global = pool->avail_desc -
1109 pool->start_priority_th;
1110 overflow_count = diff - move_desc_to_global;
1111 }
1112
1113 /* Move desc to temp_list */
1114 for (i = 0; i < move_desc_to_global; i++) {
1115 tx_desc = ol_tx_get_desc_flow_pool(pool);
1116 ((union ol_tx_desc_list_elem_t *)tx_desc)->next
1117 = temp_list;
1118 temp_list =
1119 (union ol_tx_desc_list_elem_t *)tx_desc;
1120 }
1121 }
1122
1123 /* update pool size and threshold */
1124 ol_tx_flow_pool_update(pool, new_pool_size, 0, overflow_count);
1125 update_done = 1;
1126 }
1127 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1128
1129 if (move_desc_to_global && temp_list) {
1130 /* put free descriptors to global pool */
1131 qdf_spin_lock_bh(&pdev->tx_mutex);
1132 for (i = 0; i < move_desc_to_global; i++) {
1133 tx_desc = &temp_list->tx_desc;
1134 temp_list = temp_list->next;
1135 ol_tx_put_desc_global_pool(pdev, tx_desc);
1136 }
1137 qdf_spin_unlock_bh(&pdev->tx_mutex);
1138 }
1139
1140 if (update_done)
1141 goto update_done;
1142
1143 /* Increase pool size */
1144 qdf_spin_lock_bh(&pool->flow_pool_lock);
1145 if (pool->flow_pool_size < new_pool_size) {
1146 diff = new_pool_size - pool->flow_pool_size;
1147 diff += pool->deficient_desc;
1148 pool->deficient_desc = 0;
1149 temp = QDF_MIN(pool->overflow_desc, diff);
1150 pool->overflow_desc -= temp;
1151 diff -= temp;
1152 }
1153 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1154
1155 if (diff) {
1156 /* take descriptors from global pool */
1157 qdf_spin_lock_bh(&pdev->tx_mutex);
1158
1159 if (pdev->tx_desc.num_free >= diff) {
1160 move_desc_from_global = diff;
1161 } else {
1162 move_desc_from_global = pdev->tx_desc.num_free;
1163 deficient_count = diff - move_desc_from_global;
1164 }
1165
1166 for (i = 0; i < move_desc_from_global; i++) {
1167 tx_desc = ol_tx_get_desc_global_pool(pdev);
1168 ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
1169 temp_list;
1170 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
1171 }
1172 qdf_spin_unlock_bh(&pdev->tx_mutex);
1173 }
1174 /* update desc to pool */
1175 qdf_spin_lock_bh(&pool->flow_pool_lock);
1176 if (move_desc_from_global && temp_list) {
1177 for (i = 0; i < move_desc_from_global; i++) {
1178 tx_desc = &temp_list->tx_desc;
1179 temp_list = temp_list->next;
1180 ol_tx_put_desc_flow_pool(pool, tx_desc);
1181 }
1182 }
1183 /* update pool size and threshold */
1184 ol_tx_flow_pool_update(pool, new_pool_size, deficient_count, 0);
1185 qdf_spin_unlock_bh(&pool->flow_pool_lock);
1186
1187update_done:
1188
1189 ol_tx_flow_pool_update_queue_state(pdev, pool);
1190}
1191
1192/**
1193 * ol_tx_flow_pool_resize_handler() - Resize pool with new size
1194 * @flow_pool_id: pool id
1195 * @flow_pool_size: pool size
1196 *
1197 * Process below target to host message
1198 * HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE
1199 *
1200 * Return: none
1201 */
1202void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
1203 uint16_t flow_pool_size)
1204{
1205 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1206 struct ol_tx_flow_pool_t *pool;
1207
1208 ol_txrx_dbg("%s: flow_pool_id %d flow_pool_size %d\n",
1209 __func__, flow_pool_id, flow_pool_size);
1210
1211 if (qdf_unlikely(!pdev)) {
1212 ol_txrx_err(
1213 "%s: pdev is NULL", __func__);
1214 return;
1215 }
1216 pdev->pool_stats.pool_resize_count++;
1217
1218 pool = ol_tx_get_flow_pool(flow_pool_id);
1219 if (!pool) {
1220 ol_txrx_err("%s: resize for flow_pool %d size %d failed\n",
1221 __func__, flow_pool_id, flow_pool_size);
1222 return;
1223 }
1224
1225 ol_tx_inc_pool_ref(pool);
1226 ol_tx_flow_pool_resize(pool, flow_pool_size);
1227 ol_tx_dec_pool_ref(pool, false);
1228}
1229#endif
Nirav Shahc4aa1ab2018-04-21 12:38:44 +05301230
1231/**
1232 * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
1233 * @reason: network queue pause reason
1234 *
1235 * Return: netif_reason_type
1236 */
1237static enum netif_reason_type
1238ol_txrx_map_to_netif_reason_type(uint32_t reason)
1239{
1240 switch (reason) {
1241 case OL_TXQ_PAUSE_REASON_FW:
1242 return WLAN_FW_PAUSE;
1243 case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
1244 return WLAN_PEER_UNAUTHORISED;
1245 case OL_TXQ_PAUSE_REASON_TX_ABORT:
1246 return WLAN_TX_ABORT;
1247 case OL_TXQ_PAUSE_REASON_VDEV_STOP:
1248 return WLAN_VDEV_STOP;
1249 case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
1250 return WLAN_THERMAL_MITIGATION;
1251 default:
1252 ol_txrx_err(
1253 "%s: reason not supported %d\n",
1254 __func__, reason);
1255 return WLAN_REASON_TYPE_MAX;
1256 }
1257}
1258
1259/*
1260 * ol_txrx_vdev_pause() - pause vdev network queues
1261 * @vdev: vdev handle
1262 * @reason: network queue pause reason
1263 *
1264 * Return: none
1265 */
1266void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
1267{
1268 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1269 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1270 enum netif_reason_type netif_reason;
1271
1272 if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1273 ol_txrx_err("%s: invalid pdev\n", __func__);
1274 return;
1275 }
1276
1277 netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1278 if (netif_reason == WLAN_REASON_TYPE_MAX)
1279 return;
1280
1281 pdev->pause_cb(vdev->vdev_id, WLAN_STOP_ALL_NETIF_QUEUE, netif_reason);
1282}
1283
1284/**
1285 * ol_txrx_vdev_unpause() - unpause vdev network queues
1286 * @vdev: vdev handle
1287 * @reason: network queue pause reason
1288 *
1289 * Return: none
1290 */
1291void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
1292{
1293 struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
1294 struct ol_txrx_pdev_t *pdev = vdev->pdev;
1295 enum netif_reason_type netif_reason;
1296
1297 if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
1298 ol_txrx_err("%s: invalid pdev\n", __func__);
1299 return;
1300 }
1301
1302 netif_reason = ol_txrx_map_to_netif_reason_type(reason);
1303 if (netif_reason == WLAN_REASON_TYPE_MAX)
1304 return;
1305
1306 pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
1307 netif_reason);
1308}
1309
1310/**
1311 * ol_txrx_pdev_pause() - pause network queues for each vdev
1312 * @pdev: pdev handle
1313 * @reason: network queue pause reason
1314 *
1315 * Return: none
1316 */
1317void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1318{
1319 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1320
1321 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1322 ol_txrx_vdev_pause((struct cdp_vdev *)vdev, reason);
1323 }
1324}
1325
1326/**
1327 * ol_txrx_pdev_unpause() - unpause network queues for each vdev
1328 * @pdev: pdev handle
1329 * @reason: network queue pause reason
1330 *
1331 * Return: none
1332 */
1333void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1334{
1335 struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1336
1337 TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1338 ol_txrx_vdev_unpause((struct cdp_vdev *)vdev, reason);
1339 }
1340}