blob: 405ed382f6c37b7dad7461989aca2cf1a005edd3 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
Dhanashri Atre12a08392016-02-17 13:10:34 -080041#include <ol_txrx.h> /* ol_txrx_get_vdev_from_vdev_id */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49#include <ol_cfg.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080050#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051#define INVALID_FLOW_ID 0xFF
52#define MAX_INVALID_BIN 3
53
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
55#define TX_FLOW_MGMT_POOL_ID 0xEF
56#define TX_FLOW_MGMT_POOL_SIZE 32
57
58/**
59 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
60 * @pdev: pdev handler
61 *
62 * Return: none
63 */
64static void
65ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
66{
67 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
68 TX_FLOW_MGMT_POOL_SIZE);
Yun Park29747c32017-04-05 12:05:58 -070069 if (!pdev->mgmt_pool)
Poddar, Siddarth14521792017-03-14 21:19:42 +053070 ol_txrx_err("Management pool creation failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080071}
72
73/**
74 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
75 * @pdev: pdev handler
76 *
77 * Return: none
78 */
79static void
80ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
81{
Himanshu Agarwal7d367c12017-03-30 17:16:55 +053082 ol_tx_dec_pool_ref(pdev->mgmt_pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080083}
84#else
85static inline void
86ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
87{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088}
89static inline void
90ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
91{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080092}
93#endif
94
95/**
96 * ol_tx_register_flow_control() - Register fw based tx flow control
97 * @pdev: pdev handle
98 *
99 * Return: none
100 */
101void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
102{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530103 qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
105
Nirav Shah22bf44d2015-12-10 15:39:48 +0530106 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
107 ol_tx_register_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800108}
109
110/**
111 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
112 * @pdev: pdev handle
113 *
114 * Return: none
115 */
116void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
117{
Nirav Shah7a0a9052016-04-14 16:52:21 +0530118 int i = 0;
119 struct ol_tx_flow_pool_t *pool = NULL;
120
Nirav Shah22bf44d2015-12-10 15:39:48 +0530121 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
122 ol_tx_deregister_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800123
Nirav Shah7a0a9052016-04-14 16:52:21 +0530124 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
125 while (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
126 pool = TAILQ_FIRST(&pdev->tx_desc.flow_pool_list);
127 if (!pool)
128 break;
129 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530130 ol_txrx_info("flow pool list is not empty %d!!!\n", i++);
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700131
Nirav Shah7a0a9052016-04-14 16:52:21 +0530132 if (i == 1)
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700133 ol_tx_dump_flow_pool_info((void *)pdev);
134
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530135 ol_tx_dec_pool_ref(pool, true);
Nirav Shah7a0a9052016-04-14 16:52:21 +0530136 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800137 }
Nirav Shah7a0a9052016-04-14 16:52:21 +0530138 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
139 qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140}
141
142/**
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530143 * ol_tx_delete_flow_pool() - delete flow pool
144 * @pool: flow pool pointer
145 * @force: free pool forcefully
146 *
147 * Delete flow_pool if all tx descriptors are available.
148 * Otherwise put it in FLOW_POOL_INVALID state.
149 * If force is set then pull all available descriptors to
150 * global pool.
151 *
152 * Return: 0 for success or error
153 */
154static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
155{
156 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
157 uint16_t i, size;
158 union ol_tx_desc_list_elem_t *temp_list = NULL;
159 struct ol_tx_desc_t *tx_desc = NULL;
160
161 if (!pool) {
162 ol_txrx_err(
163 "%s: pool is NULL\n", __func__);
164 QDF_ASSERT(0);
165 return -ENOMEM;
166 }
167 if (!pdev) {
168 ol_txrx_err(
169 "%s: pdev is NULL\n", __func__);
170 QDF_ASSERT(0);
171 return -ENOMEM;
172 }
173
174 qdf_spin_lock_bh(&pool->flow_pool_lock);
175 if (pool->avail_desc == pool->flow_pool_size || force == true)
176 pool->status = FLOW_POOL_INACTIVE;
177 else
178 pool->status = FLOW_POOL_INVALID;
179
180 /* Take all free descriptors and put it in temp_list */
181 temp_list = pool->freelist;
182 size = pool->avail_desc;
183 pool->freelist = NULL;
184 pool->avail_desc = 0;
185
186 if (pool->status == FLOW_POOL_INACTIVE) {
187 qdf_spin_unlock_bh(&pool->flow_pool_lock);
188 /* Free flow_pool */
189 qdf_spinlock_destroy(&pool->flow_pool_lock);
190 qdf_mem_free(pool);
191 } else { /* FLOW_POOL_INVALID case*/
192 pool->flow_pool_size -= size;
193 pool->flow_pool_id = INVALID_FLOW_ID;
194 qdf_spin_unlock_bh(&pool->flow_pool_lock);
195 ol_tx_inc_pool_ref(pool);
196
197 pdev->tx_desc.num_invalid_bin++;
198 ol_txrx_info(
199 "%s: invalid pool created %d\n",
200 __func__, pdev->tx_desc.num_invalid_bin);
201 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
202 ASSERT(0);
203
204 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
205 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
206 flow_pool_list_elem);
207 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
208 }
209
210 /* put free descriptors to global pool */
211 qdf_spin_lock_bh(&pdev->tx_mutex);
212 for (i = 0; i < size; i++) {
213 tx_desc = &temp_list->tx_desc;
214 temp_list = temp_list->next;
215
216 ol_tx_put_desc_global_pool(pdev, tx_desc);
217 }
218 qdf_spin_unlock_bh(&pdev->tx_mutex);
219
220 return 0;
221}
222
223QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
224{
225 if (!pool) {
226 ol_txrx_err("flow pool is NULL");
227 return QDF_STATUS_E_INVAL;
228 }
229
230 qdf_spin_lock_bh(&pool->flow_pool_lock);
231 qdf_atomic_inc(&pool->ref_cnt);
232 qdf_spin_unlock_bh(&pool->flow_pool_lock);
233 ol_txrx_dbg("pool %p, ref_cnt %x",
234 pool, qdf_atomic_read(&pool->ref_cnt));
235
236 return QDF_STATUS_SUCCESS;
237}
238
239QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
240{
241 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
242
243 if (!pool) {
244 ol_txrx_err("flow pool is NULL");
245 QDF_ASSERT(0);
246 return QDF_STATUS_E_INVAL;
247 }
248
249 if (!pdev) {
250 ol_txrx_err("pdev is NULL");
251 QDF_ASSERT(0);
252 return QDF_STATUS_E_INVAL;
253 }
254
255 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
256 qdf_spin_lock_bh(&pool->flow_pool_lock);
257 if (qdf_atomic_dec_and_test(&pool->ref_cnt)) {
258 qdf_spin_unlock_bh(&pool->flow_pool_lock);
259 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool,
260 flow_pool_list_elem);
261 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
262 ol_txrx_dbg("Deleting pool %p", pool);
263 ol_tx_delete_flow_pool(pool, force);
264 } else {
265 qdf_spin_unlock_bh(&pool->flow_pool_lock);
266 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
267 ol_txrx_dbg("pool %p, ref_cnt %x",
268 pool, qdf_atomic_read(&pool->ref_cnt));
269 }
270
271 return QDF_STATUS_SUCCESS;
272}
273
274/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800275 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
276 *
277 * Return: none
278 */
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700279void ol_tx_dump_flow_pool_info(void *ctx)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800280{
Manjunathappa Prakash6c547362017-03-30 20:11:47 -0700281 struct ol_txrx_pdev_t *pdev = ctx;
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530282 struct ol_tx_flow_pool_t *pool = NULL, *pool_prev = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800283 struct ol_tx_flow_pool_t tmp_pool;
284
Poddar, Siddarth14521792017-03-14 21:19:42 +0530285 ol_txrx_info("Global Pool");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530286 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530287 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530288 QDF_ASSERT(0); /* traceback */
289 return;
290 }
Poddar, Siddarth14521792017-03-14 21:19:42 +0530291 ol_txrx_info("Total %d :: Available %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530293 ol_txrx_info("Invalid flow_pool %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294 pdev->tx_desc.num_invalid_bin);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530295 ol_txrx_info("No of pool map received %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800296 pdev->pool_stats.pool_map_count);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530297 ol_txrx_info("No of pool unmap received %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298 pdev->pool_stats.pool_unmap_count);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530299 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530300 "Pkt dropped due to unavailablity of pool %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301 pdev->pool_stats.pkt_drop_no_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302
303 /*
304 * Nested spin lock.
305 * Always take in below order.
306 * flow_pool_list_lock -> flow_pool_lock
307 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530308 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
310 flow_pool_list_elem) {
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530311 ol_tx_inc_pool_ref(pool);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530312 qdf_spin_lock_bh(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530313 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530314 qdf_spin_unlock_bh(&pool->flow_pool_lock);
315 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530316
317 if (pool_prev)
318 ol_tx_dec_pool_ref(pool_prev, false);
319
Poddar, Siddarth14521792017-03-14 21:19:42 +0530320 ol_txrx_info("\n");
321 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530322 "Flow_pool_id %d :: status %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800323 tmp_pool.flow_pool_id, tmp_pool.status);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530324 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530325 "Total %d :: Available %d :: Deficient %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
327 tmp_pool.deficient_desc);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530328 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530329 "Start threshold %d :: Stop threshold %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800330 tmp_pool.start_th, tmp_pool.stop_th);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530331 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530332 "Member flow_id %d :: flow_type %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800333 tmp_pool.member_flow_id, tmp_pool.flow_type);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530334 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530335 "Pkt dropped due to unavailablity of descriptors %d",
336 tmp_pool.pkt_drop_no_desc);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530337
338 pool_prev = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530339 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530341 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800342
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530343 /* decrement ref count for last pool in list */
344 if (pool_prev)
345 ol_tx_dec_pool_ref(pool_prev, false);
346
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800347}
348
349/**
350 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
351 *
352 * Return: none
353 */
354void ol_tx_clear_flow_pool_stats(void)
355{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530356 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357
358 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530359 ol_txrx_err("%s: pdev is null\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360 __func__);
361 return;
362 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530363 qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364}
365
366/**
367 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
368 * @src_pool: source pool
369 * @dst_pool: destination pool
370 * @desc_move_count: descriptor move count
371 *
372 * Return: actual descriptors moved
373 */
374static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
375 struct ol_tx_flow_pool_t *dst_pool,
376 int desc_move_count)
377{
378 uint16_t count = 0, i;
379 struct ol_tx_desc_t *tx_desc;
380 union ol_tx_desc_list_elem_t *temp_list = NULL;
381
382 /* Take descriptors from source pool and put it in temp_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530383 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384 for (i = 0; i < desc_move_count; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700385 tx_desc = ol_tx_get_desc_flow_pool(src_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
387 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
388
389 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530390 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800391
392 /* Take descriptors from temp_list and put it in destination pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530393 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800394 for (i = 0; i < desc_move_count; i++) {
395 if (dst_pool->deficient_desc)
396 dst_pool->deficient_desc--;
397 else
398 break;
399 tx_desc = &temp_list->tx_desc;
400 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700401 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800402 count++;
403 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530404 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405
406 /* If anything is there in temp_list put it back to source pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530407 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 while (temp_list) {
409 tx_desc = &temp_list->tx_desc;
410 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700411 ol_tx_put_desc_flow_pool(src_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530413 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800414
415 return count;
416}
417
418
419/**
420 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
421 * @src_pool: source pool
422 *
423 * Distribute all descriptors of source pool to all
424 * deficient pools as per flow_pool_list.
425 *
426 * Return: 0 for sucess
427 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700428static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800429ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
430{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530431 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800432 struct ol_tx_flow_pool_t *dst_pool = NULL;
433 uint16_t desc_count = src_pool->avail_desc;
434 uint16_t desc_move_count = 0;
435
436 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530437 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800438 "%s: pdev is NULL\n", __func__);
439 return -EINVAL;
440 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530441 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
443 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530444 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445 if (dst_pool->deficient_desc) {
446 desc_move_count =
447 (dst_pool->deficient_desc > desc_count) ?
448 desc_count : dst_pool->deficient_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530449 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800450 desc_move_count = ol_tx_move_desc_n(src_pool,
451 dst_pool, desc_move_count);
452 desc_count -= desc_move_count;
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530453
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530454 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530455 if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
456 if (dst_pool->avail_desc > dst_pool->start_th) {
457 pdev->pause_cb(dst_pool->member_flow_id,
458 WLAN_WAKE_ALL_NETIF_QUEUE,
459 WLAN_DATA_FLOW_CONTROL);
460 dst_pool->status =
461 FLOW_POOL_ACTIVE_UNPAUSED;
462 }
463 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530465 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800466 if (desc_count == 0)
467 break;
468 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530469 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470
471 return 0;
472}
473
474
475/**
476 * ol_tx_create_flow_pool() - create flow pool
477 * @flow_pool_id: flow pool id
478 * @flow_pool_size: flow pool size
479 *
480 * Return: flow_pool pointer / NULL for error
481 */
482struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
483 uint16_t flow_pool_size)
484{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530485 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486 struct ol_tx_flow_pool_t *pool;
487 uint16_t size = 0, i;
488 struct ol_tx_desc_t *tx_desc;
489 union ol_tx_desc_list_elem_t *temp_list = NULL;
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530490 uint32_t stop_threshold;
491 uint32_t start_threshold;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800492
493 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530494 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800495 "%s: pdev is NULL\n", __func__);
496 return NULL;
497 }
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530498 stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
499 start_threshold = stop_threshold +
500 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530501 pool = qdf_mem_malloc(sizeof(*pool));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530503 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504 "%s: malloc failed\n", __func__);
505 return NULL;
506 }
507
508 pool->flow_pool_id = flow_pool_id;
509 pool->flow_pool_size = flow_pool_size;
510 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
511 pool->start_th = (start_threshold * flow_pool_size)/100;
512 pool->stop_th = (stop_threshold * flow_pool_size)/100;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530513 qdf_spinlock_create(&pool->flow_pool_lock);
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530514 qdf_atomic_init(&pool->ref_cnt);
515 ol_tx_inc_pool_ref(pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800516
517 /* Take TX descriptor from global_pool and put it in temp_list*/
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530518 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800519 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
520 size = pool->flow_pool_size;
521 else
522 size = pdev->tx_desc.num_free;
523
524 for (i = 0; i < size; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700525 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526 tx_desc->pool = pool;
527 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
528 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
529
530 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530531 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532
533 /* put temp_list to flow_pool */
534 pool->freelist = temp_list;
535 pool->avail_desc = size;
536 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
537
538 /* Add flow_pool to flow_pool_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530539 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
541 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530542 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800543
544 return pool;
545}
546
547/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800548 * ol_tx_free_invalid_flow_pool() - free invalid pool
549 * @pool: pool
550 *
551 * Return: 0 for success or failure
552 */
553int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
554{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530555 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556
557 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530558 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800559 "%s: Invalid pool/pdev\n", __func__);
560 return -EINVAL;
561 }
562
563 /* direclty distribute to other deficient pools */
564 ol_tx_distribute_descs_to_deficient_pools(pool);
565
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530566 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800567 pool->flow_pool_size = pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530568 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800569
570 pdev->tx_desc.num_invalid_bin--;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530571 ol_txrx_info(
Nirav Shah2ae038d2015-12-23 20:36:11 +0530572 "%s: invalid pool deleted %d\n",
573 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800574
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530575 return ol_tx_dec_pool_ref(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576}
577
578/**
579 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
580 * @flow_pool_id: flow pool id
581 *
582 * Return: flow_pool ptr / NULL if not found
583 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700584static struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800585{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530586 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800587 struct ol_tx_flow_pool_t *pool = NULL;
588 bool is_found = false;
589
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530590 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530591 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530592 QDF_ASSERT(0); /* traceback */
593 return NULL;
594 }
595
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530596 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800597 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
598 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530599 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800600 if (pool->flow_pool_id == flow_pool_id) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530601 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 is_found = true;
603 break;
604 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530605 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800606 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530607 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800608
609 if (is_found == false)
610 pool = NULL;
611
612 return pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613}
614
615
616/**
617 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
618 * @pool: flow_pool
619 * @vdev_id: flow_id /vdev_id
620 *
621 * Return: none
622 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700623static void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
624 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800625{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800626 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800627
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800628 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800629 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530630 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800631 "%s: invalid vdev_id %d\n",
632 __func__, vdev_id);
633 return;
634 }
635
636 vdev->pool = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530637 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 pool->member_flow_id = vdev_id;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530639 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800640}
641
642/**
643 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
644 * @pool: flow_pool
645 * @vdev_id: flow_id /vdev_id
646 *
647 * Return: none
648 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700649static void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
650 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800651{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800652 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800654 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800655 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530656 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800657 "%s: invalid vdev_id %d\n",
658 __func__, vdev_id);
659 return;
660 }
661
662 vdev->pool = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530663 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 pool->member_flow_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530665 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666}
667
668/**
669 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
670 * @flow_id: flow id
671 * @flow_type: flow type
672 * @flow_pool_id: pool id
673 * @flow_pool_size: pool size
674 *
675 * Process below target to host message
676 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
677 *
678 * Return: none
679 */
680void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
681 uint8_t flow_pool_id, uint16_t flow_pool_size)
682{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530683 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800684 struct ol_tx_flow_pool_t *pool;
685 uint8_t pool_create = 0;
686 enum htt_flow_type type = flow_type;
687
Poddar, Siddarth14521792017-03-14 21:19:42 +0530688 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800689 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
690 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
691
Anurag Chouhanc5548422016-02-24 18:33:27 +0530692 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530693 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800694 "%s: pdev is NULL", __func__);
695 return;
696 }
697 pdev->pool_stats.pool_map_count++;
698
699 pool = ol_tx_get_flow_pool(flow_pool_id);
700 if (!pool) {
701 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
702 if (pool == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530703 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800704 "%s: creation of flow_pool %d size %d failed\n",
705 __func__, flow_pool_id, flow_pool_size);
706 return;
707 }
708 pool_create = 1;
709 }
710
711 switch (type) {
712
713 case FLOW_TYPE_VDEV:
714 ol_tx_flow_pool_vdev_map(pool, flow_id);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530715 pdev->pause_cb(flow_id,
716 WLAN_WAKE_ALL_NETIF_QUEUE,
717 WLAN_DATA_FLOW_CONTROL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800718 break;
719 default:
720 if (pool_create)
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530721 ol_tx_dec_pool_ref(pool, false);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530722 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800723 "%s: flow type %d not supported !!!\n",
724 __func__, type);
725 break;
726 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800727}
728
729/**
730 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
731 * @flow_id: flow id
732 * @flow_type: flow type
733 * @flow_pool_id: pool id
734 *
735 * Process below target to host message
736 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
737 *
738 * Return: none
739 */
740void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
741 uint8_t flow_pool_id)
742{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530743 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800744 struct ol_tx_flow_pool_t *pool;
745 enum htt_flow_type type = flow_type;
746
Poddar, Siddarth14521792017-03-14 21:19:42 +0530747 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800748 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
749 __func__, flow_id, flow_type, flow_pool_id);
750
Anurag Chouhanc5548422016-02-24 18:33:27 +0530751 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530752 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800753 "%s: pdev is NULL", __func__);
754 return;
755 }
756 pdev->pool_stats.pool_unmap_count++;
757
758 pool = ol_tx_get_flow_pool(flow_pool_id);
759 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530760 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800761 "%s: flow_pool not available flow_pool_id %d\n",
762 __func__, type);
763 return;
764 }
765
766 switch (type) {
767
768 case FLOW_TYPE_VDEV:
769 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
770 break;
771 default:
Poddar, Siddarth14521792017-03-14 21:19:42 +0530772 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800773 "%s: flow type %d not supported !!!\n",
774 __func__, type);
775 return;
776 }
777
Himanshu Agarwal7d367c12017-03-30 17:16:55 +0530778 /*
779 * only delete if all descriptors are available
780 * and pool ref count becomes 0
781 */
782 ol_tx_dec_pool_ref(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800783}
784
785