blob: a0d4404cb0d4a6da8d70a4991afde9b8bd84f828 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan8e0ccd32016-02-19 15:30:20 +05302 * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
Dhanashri Atre12a08392016-02-17 13:10:34 -080041#include <ol_txrx.h> /* ol_txrx_get_vdev_from_vdev_id */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49#include <ol_cfg.h>
50
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051#define INVALID_FLOW_ID 0xFF
52#define MAX_INVALID_BIN 3
53
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
55#define TX_FLOW_MGMT_POOL_ID 0xEF
56#define TX_FLOW_MGMT_POOL_SIZE 32
57
58/**
59 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
60 * @pdev: pdev handler
61 *
62 * Return: none
63 */
64static void
65ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
66{
67 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
68 TX_FLOW_MGMT_POOL_SIZE);
69 if (!pdev->mgmt_pool) {
70 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
71 "Management pool creation failed\n");
72 }
73 return;
74}
75
76/**
77 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
78 * @pdev: pdev handler
79 *
80 * Return: none
81 */
82static void
83ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
84{
85 ol_tx_delete_flow_pool(pdev->mgmt_pool);
86 return;
87}
88#else
89static inline void
90ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
91{
92 return;
93}
94static inline void
95ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
96{
97 return;
98}
99#endif
100
101/**
102 * ol_tx_register_flow_control() - Register fw based tx flow control
103 * @pdev: pdev handle
104 *
105 * Return: none
106 */
107void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
108{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530109 qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
111
Nirav Shah22bf44d2015-12-10 15:39:48 +0530112 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
113 ol_tx_register_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800114}
115
116/**
117 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
118 * @pdev: pdev handle
119 *
120 * Return: none
121 */
122void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
123{
Nirav Shah22bf44d2015-12-10 15:39:48 +0530124 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
125 ol_tx_deregister_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800126
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530127 qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128 if (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
129 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
130 "flow pool list is not empty!!!\n");
131 }
132}
133
134/**
135 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
136 *
137 * Return: none
138 */
139void ol_tx_dump_flow_pool_info(void)
140{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530141 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800142 struct ol_tx_flow_pool_t *pool = NULL;
143 struct ol_tx_flow_pool_t tmp_pool;
144
145
146 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global Pool\n");
147 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Total %d :: Available %d\n",
148 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
149 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid flow_pool %d\n",
150 pdev->tx_desc.num_invalid_bin);
151
152 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool map received %d\n",
153 pdev->pool_stats.pool_map_count);
154 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool unmap received %d\n",
155 pdev->pool_stats.pool_unmap_count);
156 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
157 "Pkt dropped due to unavailablity of pool %d\n",
158 pdev->pool_stats.pkt_drop_no_pool);
159 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
160 "Pkt dropped due to unavailablity of descriptors %d\n",
161 pdev->pool_stats.pkt_drop_no_desc);
162
163 /*
164 * Nested spin lock.
165 * Always take in below order.
166 * flow_pool_list_lock -> flow_pool_lock
167 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530168 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800169 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
170 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530171 qdf_spin_lock_bh(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530172 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530173 qdf_spin_unlock_bh(&pool->flow_pool_lock);
174 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
176 "Flow_pool_id %d :: status %d\n",
177 tmp_pool.flow_pool_id, tmp_pool.status);
178 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
179 "Total %d :: Available %d :: Deficient %d\n",
180 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
181 tmp_pool.deficient_desc);
182 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
183 "Start_TH %d :: Stop_TH %d\n",
184 tmp_pool.start_th, tmp_pool.stop_th);
185 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
186 "Member flow_id %d :: flow_type %d\n",
187 tmp_pool.member_flow_id, tmp_pool.flow_type);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530188 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800189 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530190 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191
192 return;
193}
194
195/**
196 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
197 *
198 * Return: none
199 */
200void ol_tx_clear_flow_pool_stats(void)
201{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530202 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800203
204 if (!pdev) {
205 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev is null\n",
206 __func__);
207 return;
208 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530209 qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800210}
211
212/**
213 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
214 * @src_pool: source pool
215 * @dst_pool: destination pool
216 * @desc_move_count: descriptor move count
217 *
218 * Return: actual descriptors moved
219 */
220static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
221 struct ol_tx_flow_pool_t *dst_pool,
222 int desc_move_count)
223{
224 uint16_t count = 0, i;
225 struct ol_tx_desc_t *tx_desc;
226 union ol_tx_desc_list_elem_t *temp_list = NULL;
227
228 /* Take descriptors from source pool and put it in temp_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530229 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800230 for (i = 0; i < desc_move_count; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700231 tx_desc = ol_tx_get_desc_flow_pool(src_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800232 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
233 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
234
235 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530236 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237
238 /* Take descriptors from temp_list and put it in destination pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530239 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800240 for (i = 0; i < desc_move_count; i++) {
241 if (dst_pool->deficient_desc)
242 dst_pool->deficient_desc--;
243 else
244 break;
245 tx_desc = &temp_list->tx_desc;
246 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700247 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248 count++;
249 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530250 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251
252 /* If anything is there in temp_list put it back to source pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530253 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800254 while (temp_list) {
255 tx_desc = &temp_list->tx_desc;
256 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700257 ol_tx_put_desc_flow_pool(src_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800258 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530259 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800260
261 return count;
262}
263
264
265/**
266 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
267 * @src_pool: source pool
268 *
269 * Distribute all descriptors of source pool to all
270 * deficient pools as per flow_pool_list.
271 *
272 * Return: 0 for sucess
273 */
274int
275ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
276{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530277 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278 struct ol_tx_flow_pool_t *dst_pool = NULL;
279 uint16_t desc_count = src_pool->avail_desc;
280 uint16_t desc_move_count = 0;
281
282 if (!pdev) {
283 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
284 "%s: pdev is NULL\n", __func__);
285 return -EINVAL;
286 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530287 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800288 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
289 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530290 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291 if (dst_pool->deficient_desc) {
292 desc_move_count =
293 (dst_pool->deficient_desc > desc_count) ?
294 desc_count : dst_pool->deficient_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530295 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800296 desc_move_count = ol_tx_move_desc_n(src_pool,
297 dst_pool, desc_move_count);
298 desc_count -= desc_move_count;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530299 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530301 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800302 if (desc_count == 0)
303 break;
304 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530305 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306
307 return 0;
308}
309
310
311/**
312 * ol_tx_create_flow_pool() - create flow pool
313 * @flow_pool_id: flow pool id
314 * @flow_pool_size: flow pool size
315 *
316 * Return: flow_pool pointer / NULL for error
317 */
318struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
319 uint16_t flow_pool_size)
320{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530321 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800322 struct ol_tx_flow_pool_t *pool;
323 uint16_t size = 0, i;
324 struct ol_tx_desc_t *tx_desc;
325 union ol_tx_desc_list_elem_t *temp_list = NULL;
326 uint32_t stop_threshold =
327 ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
328 uint32_t start_threshold = stop_threshold +
329 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
330
331 if (!pdev) {
332 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
333 "%s: pdev is NULL\n", __func__);
334 return NULL;
335 }
336
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530337 pool = qdf_mem_malloc(sizeof(*pool));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338 if (!pool) {
339 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
340 "%s: malloc failed\n", __func__);
341 return NULL;
342 }
343
344 pool->flow_pool_id = flow_pool_id;
345 pool->flow_pool_size = flow_pool_size;
346 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
347 pool->start_th = (start_threshold * flow_pool_size)/100;
348 pool->stop_th = (stop_threshold * flow_pool_size)/100;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530349 qdf_spinlock_create(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350
351 /* Take TX descriptor from global_pool and put it in temp_list*/
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530352 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800353 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
354 size = pool->flow_pool_size;
355 else
356 size = pdev->tx_desc.num_free;
357
358 for (i = 0; i < size; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700359 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800360 tx_desc->pool = pool;
361 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
362 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
363
364 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530365 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800366
367 /* put temp_list to flow_pool */
368 pool->freelist = temp_list;
369 pool->avail_desc = size;
370 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
371
372 /* Add flow_pool to flow_pool_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530373 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
375 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530376 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377
378 return pool;
379}
380
381/**
382 * ol_tx_delete_flow_pool() - delete flow pool
383 * @pool: flow pool pointer
384 *
385 * Delete flow_pool if all tx descriptors are available.
386 * Otherwise put it in FLOW_POOL_INVALID state.
387 *
388 * Return: 0 for success or error
389 */
390int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
391{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530392 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393 uint16_t i, size;
394 union ol_tx_desc_list_elem_t *temp_list = NULL;
395 struct ol_tx_desc_t *tx_desc = NULL;
396
397 if (!pool) {
398 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
399 "%s: pool is NULL\n", __func__);
400 return -ENOMEM;
401 }
402
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530403 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530405 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800406
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530407 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800408 if (pool->avail_desc == pool->flow_pool_size)
409 pool->status = FLOW_POOL_INACTIVE;
410 else
411 pool->status = FLOW_POOL_INVALID;
412
413 /* Take all free descriptors and put it in temp_list */
414 temp_list = pool->freelist;
415 size = pool->avail_desc;
416 pool->freelist = NULL;
417 pool->avail_desc = 0;
418
419 if (pool->status == FLOW_POOL_INACTIVE) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530420 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421 /* Free flow_pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530422 qdf_spinlock_destroy(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530423 qdf_mem_free(pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800424 } else { /* FLOW_POOL_INVALID case*/
425 pool->flow_pool_size -= size;
426 pool->flow_pool_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530427 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428
429 pdev->tx_desc.num_invalid_bin++;
Nirav Shah2ae038d2015-12-23 20:36:11 +0530430 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
431 "%s: invalid pool created %d\n",
432 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800433 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
434 ASSERT(0);
435
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530436 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800437 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
438 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530439 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800440 }
441
442 /* put free descriptors to global pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530443 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444 for (i = 0; i < size; i++) {
445 tx_desc = &temp_list->tx_desc;
446 temp_list = temp_list->next;
447
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700448 ol_tx_put_desc_global_pool(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800449 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530450 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800451
452 return 0;
453}
454
455
456/**
457 * ol_tx_free_invalid_flow_pool() - free invalid pool
458 * @pool: pool
459 *
460 * Return: 0 for success or failure
461 */
462int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
463{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530464 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465
466 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
467 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
468 "%s: Invalid pool/pdev\n", __func__);
469 return -EINVAL;
470 }
471
472 /* direclty distribute to other deficient pools */
473 ol_tx_distribute_descs_to_deficient_pools(pool);
474
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530475 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 pool->flow_pool_size = pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530477 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800478
479 pdev->tx_desc.num_invalid_bin--;
Nirav Shah2ae038d2015-12-23 20:36:11 +0530480 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
481 "%s: invalid pool deleted %d\n",
482 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800483
484 return ol_tx_delete_flow_pool(pool);
485}
486
487/**
488 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
489 * @flow_pool_id: flow pool id
490 *
491 * Return: flow_pool ptr / NULL if not found
492 */
493struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
494{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530495 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496 struct ol_tx_flow_pool_t *pool = NULL;
497 bool is_found = false;
498
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530499 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800500 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
501 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530502 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800503 if (pool->flow_pool_id == flow_pool_id) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530504 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800505 is_found = true;
506 break;
507 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530508 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800509 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530510 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800511
512 if (is_found == false)
513 pool = NULL;
514
515 return pool;
516
517}
518
519
520/**
521 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
522 * @pool: flow_pool
523 * @vdev_id: flow_id /vdev_id
524 *
525 * Return: none
526 */
527void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
528 uint8_t vdev_id)
529{
530 ol_txrx_vdev_handle vdev;
531
532 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
533 if (!vdev) {
534 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
535 "%s: invalid vdev_id %d\n",
536 __func__, vdev_id);
537 return;
538 }
539
540 vdev->pool = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530541 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800542 pool->member_flow_id = vdev_id;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530543 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544
545 return;
546}
547
548/**
549 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
550 * @pool: flow_pool
551 * @vdev_id: flow_id /vdev_id
552 *
553 * Return: none
554 */
555void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
556 uint8_t vdev_id)
557{
558 ol_txrx_vdev_handle vdev;
559
560 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
561 if (!vdev) {
562 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
563 "%s: invalid vdev_id %d\n",
564 __func__, vdev_id);
565 return;
566 }
567
568 vdev->pool = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530569 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570 pool->member_flow_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530571 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572
573 return;
574}
575
576/**
577 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
578 * @flow_id: flow id
579 * @flow_type: flow type
580 * @flow_pool_id: pool id
581 * @flow_pool_size: pool size
582 *
583 * Process below target to host message
584 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
585 *
586 * Return: none
587 */
588void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
589 uint8_t flow_pool_id, uint16_t flow_pool_size)
590{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530591 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592 struct ol_tx_flow_pool_t *pool;
593 uint8_t pool_create = 0;
594 enum htt_flow_type type = flow_type;
595
596
597 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
598 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
599 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
600
Anurag Chouhanc5548422016-02-24 18:33:27 +0530601 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
603 "%s: pdev is NULL", __func__);
604 return;
605 }
606 pdev->pool_stats.pool_map_count++;
607
608 pool = ol_tx_get_flow_pool(flow_pool_id);
609 if (!pool) {
610 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
611 if (pool == NULL) {
612 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
613 "%s: creation of flow_pool %d size %d failed\n",
614 __func__, flow_pool_id, flow_pool_size);
615 return;
616 }
617 pool_create = 1;
618 }
619
620 switch (type) {
621
622 case FLOW_TYPE_VDEV:
623 ol_tx_flow_pool_vdev_map(pool, flow_id);
624 break;
625 default:
626 if (pool_create)
627 ol_tx_delete_flow_pool(pool);
628 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
629 "%s: flow type %d not supported !!!\n",
630 __func__, type);
631 break;
632 }
633
634 return;
635}
636
637/**
638 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
639 * @flow_id: flow id
640 * @flow_type: flow type
641 * @flow_pool_id: pool id
642 *
643 * Process below target to host message
644 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
645 *
646 * Return: none
647 */
648void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
649 uint8_t flow_pool_id)
650{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530651 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800652 struct ol_tx_flow_pool_t *pool;
653 enum htt_flow_type type = flow_type;
654
655 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
656 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
657 __func__, flow_id, flow_type, flow_pool_id);
658
Anurag Chouhanc5548422016-02-24 18:33:27 +0530659 if (qdf_unlikely(!pdev)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800660 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
661 "%s: pdev is NULL", __func__);
662 return;
663 }
664 pdev->pool_stats.pool_unmap_count++;
665
666 pool = ol_tx_get_flow_pool(flow_pool_id);
667 if (!pool) {
668 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
669 "%s: flow_pool not available flow_pool_id %d\n",
670 __func__, type);
671 return;
672 }
673
674 switch (type) {
675
676 case FLOW_TYPE_VDEV:
677 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
678 break;
679 default:
680 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
681 "%s: flow type %d not supported !!!\n",
682 __func__, type);
683 return;
684 }
685
686 /* only delete if all descriptors are available */
687 ol_tx_delete_flow_pool(pool);
688
689 return;
690}
691
692