blob: 53a8fad0333b37556aac74b9515931e7c3a6ca6a [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
Nirav Shahcbc6d722016-03-01 16:24:53 +053029#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
Anurag Chouhanf04e84f2016-03-03 10:12:12 +053030#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
Anurag Chouhanc5548422016-02-24 18:33:27 +053031#include <qdf_util.h> /* qdf_unlikely */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080032
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036
37/* internal header files relevant for all systems */
38#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080039#include <ol_tx_desc.h> /* ol_tx_desc */
40#include <ol_tx_send.h> /* ol_tx_send */
Dhanashri Atre12a08392016-02-17 13:10:34 -080041#include <ol_txrx.h> /* ol_txrx_get_vdev_from_vdev_id */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080042
43/* internal header files relevant only for HL systems */
44#include <ol_tx_queue.h> /* ol_tx_enqueue */
45
46/* internal header files relevant only for specific systems (Pronto) */
47#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
48#include <ol_tx.h>
49#include <ol_cfg.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080050#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051#define INVALID_FLOW_ID 0xFF
52#define MAX_INVALID_BIN 3
53
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
55#define TX_FLOW_MGMT_POOL_ID 0xEF
56#define TX_FLOW_MGMT_POOL_SIZE 32
57
58/**
59 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
60 * @pdev: pdev handler
61 *
62 * Return: none
63 */
64static void
65ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
66{
67 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
68 TX_FLOW_MGMT_POOL_SIZE);
69 if (!pdev->mgmt_pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +053070 ol_txrx_err("Management pool creation failed\n");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080071 }
72 return;
73}
74
75/**
76 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
77 * @pdev: pdev handler
78 *
79 * Return: none
80 */
81static void
82ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
83{
Nirav Shah7a0a9052016-04-14 16:52:21 +053084 ol_tx_delete_flow_pool(pdev->mgmt_pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085 return;
86}
87#else
88static inline void
89ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
90{
91 return;
92}
93static inline void
94ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
95{
96 return;
97}
98#endif
99
100/**
101 * ol_tx_register_flow_control() - Register fw based tx flow control
102 * @pdev: pdev handle
103 *
104 * Return: none
105 */
106void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
107{
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530108 qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800109 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
110
Nirav Shah22bf44d2015-12-10 15:39:48 +0530111 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
112 ol_tx_register_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800113}
114
115/**
116 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
117 * @pdev: pdev handle
118 *
119 * Return: none
120 */
121void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
122{
Nirav Shah7a0a9052016-04-14 16:52:21 +0530123 int i = 0;
124 struct ol_tx_flow_pool_t *pool = NULL;
125
Nirav Shah22bf44d2015-12-10 15:39:48 +0530126 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
127 ol_tx_deregister_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800128
Nirav Shah7a0a9052016-04-14 16:52:21 +0530129 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
130 while (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
131 pool = TAILQ_FIRST(&pdev->tx_desc.flow_pool_list);
132 if (!pool)
133 break;
134 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530135 ol_txrx_info("flow pool list is not empty %d!!!\n", i++);
Nirav Shah7a0a9052016-04-14 16:52:21 +0530136 if (i == 1)
137 ol_tx_dump_flow_pool_info();
138 ol_tx_delete_flow_pool(pool, true);
139 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140 }
Nirav Shah7a0a9052016-04-14 16:52:21 +0530141 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
142 qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800143}
144
145/**
146 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
147 *
148 * Return: none
149 */
150void ol_tx_dump_flow_pool_info(void)
151{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530152 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800153 struct ol_tx_flow_pool_t *pool = NULL;
154 struct ol_tx_flow_pool_t tmp_pool;
155
156
Poddar, Siddarth14521792017-03-14 21:19:42 +0530157 ol_txrx_info("Global Pool");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530158 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530159 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530160 QDF_ASSERT(0); /* traceback */
161 return;
162 }
Poddar, Siddarth14521792017-03-14 21:19:42 +0530163 ol_txrx_info("Total %d :: Available %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530165 ol_txrx_info("Invalid flow_pool %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800166 pdev->tx_desc.num_invalid_bin);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530167 ol_txrx_info("No of pool map received %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800168 pdev->pool_stats.pool_map_count);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530169 ol_txrx_info("No of pool unmap received %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800170 pdev->pool_stats.pool_unmap_count);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530171 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530172 "Pkt dropped due to unavailablity of pool %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173 pdev->pool_stats.pkt_drop_no_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800174
175 /*
176 * Nested spin lock.
177 * Always take in below order.
178 * flow_pool_list_lock -> flow_pool_lock
179 */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530180 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800181 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
182 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530183 qdf_spin_lock_bh(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530184 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530185 qdf_spin_unlock_bh(&pool->flow_pool_lock);
186 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530187 ol_txrx_info("\n");
188 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530189 "Flow_pool_id %d :: status %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800190 tmp_pool.flow_pool_id, tmp_pool.status);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530191 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530192 "Total %d :: Available %d :: Deficient %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800193 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
194 tmp_pool.deficient_desc);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530195 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530196 "Start threshold %d :: Stop threshold %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197 tmp_pool.start_th, tmp_pool.stop_th);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530198 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530199 "Member flow_id %d :: flow_type %d",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800200 tmp_pool.member_flow_id, tmp_pool.flow_type);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530201 ol_txrx_info(
Nirav Shahda008342016-05-17 18:50:40 +0530202 "Pkt dropped due to unavailablity of descriptors %d",
203 tmp_pool.pkt_drop_no_desc);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530204 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800205 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530206 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800207
208 return;
209}
210
211/**
212 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
213 *
214 * Return: none
215 */
216void ol_tx_clear_flow_pool_stats(void)
217{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530218 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800219
220 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530221 ol_txrx_err("%s: pdev is null\n",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800222 __func__);
223 return;
224 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530225 qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226}
227
228/**
229 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
230 * @src_pool: source pool
231 * @dst_pool: destination pool
232 * @desc_move_count: descriptor move count
233 *
234 * Return: actual descriptors moved
235 */
236static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
237 struct ol_tx_flow_pool_t *dst_pool,
238 int desc_move_count)
239{
240 uint16_t count = 0, i;
241 struct ol_tx_desc_t *tx_desc;
242 union ol_tx_desc_list_elem_t *temp_list = NULL;
243
244 /* Take descriptors from source pool and put it in temp_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530245 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800246 for (i = 0; i < desc_move_count; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700247 tx_desc = ol_tx_get_desc_flow_pool(src_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800248 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
249 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
250
251 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530252 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800253
254 /* Take descriptors from temp_list and put it in destination pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530255 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800256 for (i = 0; i < desc_move_count; i++) {
257 if (dst_pool->deficient_desc)
258 dst_pool->deficient_desc--;
259 else
260 break;
261 tx_desc = &temp_list->tx_desc;
262 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700263 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264 count++;
265 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530266 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267
268 /* If anything is there in temp_list put it back to source pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530269 qdf_spin_lock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800270 while (temp_list) {
271 tx_desc = &temp_list->tx_desc;
272 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700273 ol_tx_put_desc_flow_pool(src_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800274 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530275 qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276
277 return count;
278}
279
280
281/**
282 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
283 * @src_pool: source pool
284 *
285 * Distribute all descriptors of source pool to all
286 * deficient pools as per flow_pool_list.
287 *
288 * Return: 0 for sucess
289 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700290static int
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
292{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530293 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294 struct ol_tx_flow_pool_t *dst_pool = NULL;
295 uint16_t desc_count = src_pool->avail_desc;
296 uint16_t desc_move_count = 0;
297
298 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530299 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800300 "%s: pdev is NULL\n", __func__);
301 return -EINVAL;
302 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530303 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
305 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530306 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800307 if (dst_pool->deficient_desc) {
308 desc_move_count =
309 (dst_pool->deficient_desc > desc_count) ?
310 desc_count : dst_pool->deficient_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530311 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800312 desc_move_count = ol_tx_move_desc_n(src_pool,
313 dst_pool, desc_move_count);
314 desc_count -= desc_move_count;
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530315
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530316 qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530317 if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
318 if (dst_pool->avail_desc > dst_pool->start_th) {
319 pdev->pause_cb(dst_pool->member_flow_id,
320 WLAN_WAKE_ALL_NETIF_QUEUE,
321 WLAN_DATA_FLOW_CONTROL);
322 dst_pool->status =
323 FLOW_POOL_ACTIVE_UNPAUSED;
324 }
325 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800326 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530327 qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800328 if (desc_count == 0)
329 break;
330 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530331 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800332
333 return 0;
334}
335
336
337/**
338 * ol_tx_create_flow_pool() - create flow pool
339 * @flow_pool_id: flow pool id
340 * @flow_pool_size: flow pool size
341 *
342 * Return: flow_pool pointer / NULL for error
343 */
344struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
345 uint16_t flow_pool_size)
346{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530347 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800348 struct ol_tx_flow_pool_t *pool;
349 uint16_t size = 0, i;
350 struct ol_tx_desc_t *tx_desc;
351 union ol_tx_desc_list_elem_t *temp_list = NULL;
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530352 uint32_t stop_threshold;
353 uint32_t start_threshold;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800354
355 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530356 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 "%s: pdev is NULL\n", __func__);
358 return NULL;
359 }
Poddar, Siddarth21e7bf02016-07-04 14:06:38 +0530360 stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
361 start_threshold = stop_threshold +
362 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530363 pool = qdf_mem_malloc(sizeof(*pool));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800364 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530365 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800366 "%s: malloc failed\n", __func__);
367 return NULL;
368 }
369
370 pool->flow_pool_id = flow_pool_id;
371 pool->flow_pool_size = flow_pool_size;
372 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
373 pool->start_th = (start_threshold * flow_pool_size)/100;
374 pool->stop_th = (stop_threshold * flow_pool_size)/100;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530375 qdf_spinlock_create(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376
377 /* Take TX descriptor from global_pool and put it in temp_list*/
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530378 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
380 size = pool->flow_pool_size;
381 else
382 size = pdev->tx_desc.num_free;
383
384 for (i = 0; i < size; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700385 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800386 tx_desc->pool = pool;
387 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
388 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
389
390 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530391 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800392
393 /* put temp_list to flow_pool */
394 pool->freelist = temp_list;
395 pool->avail_desc = size;
396 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
397
398 /* Add flow_pool to flow_pool_list */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530399 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800400 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
401 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530402 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403
404 return pool;
405}
406
407/**
408 * ol_tx_delete_flow_pool() - delete flow pool
409 * @pool: flow pool pointer
Nirav Shah7a0a9052016-04-14 16:52:21 +0530410 * @force: free pool forcefully
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800411 *
412 * Delete flow_pool if all tx descriptors are available.
413 * Otherwise put it in FLOW_POOL_INVALID state.
Nirav Shah7a0a9052016-04-14 16:52:21 +0530414 * If force is set then pull all available descriptors to
415 * global pool.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800416 *
417 * Return: 0 for success or error
418 */
Nirav Shah7a0a9052016-04-14 16:52:21 +0530419int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800420{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530421 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422 uint16_t i, size;
423 union ol_tx_desc_list_elem_t *temp_list = NULL;
424 struct ol_tx_desc_t *tx_desc = NULL;
425
426 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530427 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 "%s: pool is NULL\n", __func__);
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530429 QDF_ASSERT(0);
430 return -ENOMEM;
431 }
432 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530433 ol_txrx_err(
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530434 "%s: pdev is NULL\n", __func__);
435 QDF_ASSERT(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800436 return -ENOMEM;
437 }
438
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530439 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800440 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530441 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800442
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530443 qdf_spin_lock_bh(&pool->flow_pool_lock);
Nirav Shah7a0a9052016-04-14 16:52:21 +0530444 if (pool->avail_desc == pool->flow_pool_size || force == true)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445 pool->status = FLOW_POOL_INACTIVE;
446 else
447 pool->status = FLOW_POOL_INVALID;
448
449 /* Take all free descriptors and put it in temp_list */
450 temp_list = pool->freelist;
451 size = pool->avail_desc;
452 pool->freelist = NULL;
453 pool->avail_desc = 0;
454
455 if (pool->status == FLOW_POOL_INACTIVE) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530456 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457 /* Free flow_pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530458 qdf_spinlock_destroy(&pool->flow_pool_lock);
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530459 qdf_mem_free(pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460 } else { /* FLOW_POOL_INVALID case*/
461 pool->flow_pool_size -= size;
462 pool->flow_pool_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530463 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800464
465 pdev->tx_desc.num_invalid_bin++;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530466 ol_txrx_info(
Nirav Shah2ae038d2015-12-23 20:36:11 +0530467 "%s: invalid pool created %d\n",
468 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800469 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
470 ASSERT(0);
471
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530472 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800473 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
474 flow_pool_list_elem);
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530475 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800476 }
477
478 /* put free descriptors to global pool */
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530479 qdf_spin_lock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480 for (i = 0; i < size; i++) {
481 tx_desc = &temp_list->tx_desc;
482 temp_list = temp_list->next;
483
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700484 ol_tx_put_desc_global_pool(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530486 qdf_spin_unlock_bh(&pdev->tx_mutex);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800487
488 return 0;
489}
490
491
492/**
493 * ol_tx_free_invalid_flow_pool() - free invalid pool
494 * @pool: pool
495 *
496 * Return: 0 for success or failure
497 */
498int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
499{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530500 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501
502 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530503 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800504 "%s: Invalid pool/pdev\n", __func__);
505 return -EINVAL;
506 }
507
508 /* direclty distribute to other deficient pools */
509 ol_tx_distribute_descs_to_deficient_pools(pool);
510
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530511 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800512 pool->flow_pool_size = pool->avail_desc;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530513 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514
515 pdev->tx_desc.num_invalid_bin--;
Poddar, Siddarth14521792017-03-14 21:19:42 +0530516 ol_txrx_info(
Nirav Shah2ae038d2015-12-23 20:36:11 +0530517 "%s: invalid pool deleted %d\n",
518 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800519
Nirav Shah7a0a9052016-04-14 16:52:21 +0530520 return ol_tx_delete_flow_pool(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800521}
522
523/**
524 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
525 * @flow_pool_id: flow pool id
526 *
527 * Return: flow_pool ptr / NULL if not found
528 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700529static struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800530{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530531 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800532 struct ol_tx_flow_pool_t *pool = NULL;
533 bool is_found = false;
534
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530535 if (!pdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530536 ol_txrx_err("ERROR: pdev NULL");
Himanshu Agarwalf03f8102016-09-08 18:54:47 +0530537 QDF_ASSERT(0); /* traceback */
538 return NULL;
539 }
540
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530541 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800542 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
543 flow_pool_list_elem) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530544 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800545 if (pool->flow_pool_id == flow_pool_id) {
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530546 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800547 is_found = true;
548 break;
549 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530550 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800551 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530552 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800553
554 if (is_found == false)
555 pool = NULL;
556
557 return pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800558}
559
560
561/**
562 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
563 * @pool: flow_pool
564 * @vdev_id: flow_id /vdev_id
565 *
566 * Return: none
567 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700568static void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
569 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800570{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800571 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800572
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800573 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800574 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530575 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576 "%s: invalid vdev_id %d\n",
577 __func__, vdev_id);
578 return;
579 }
580
581 vdev->pool = pool;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530582 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 pool->member_flow_id = vdev_id;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530584 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800585
586 return;
587}
588
589/**
590 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
591 * @pool: flow_pool
592 * @vdev_id: flow_id /vdev_id
593 *
594 * Return: none
595 */
Jeff Johnsonf89f58f2016-10-14 09:58:29 -0700596static void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
597 uint8_t vdev_id)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800599 struct ol_txrx_vdev_t *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800600
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800601 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800602 if (!vdev) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530603 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800604 "%s: invalid vdev_id %d\n",
605 __func__, vdev_id);
606 return;
607 }
608
609 vdev->pool = NULL;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530610 qdf_spin_lock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800611 pool->member_flow_id = INVALID_FLOW_ID;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530612 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613
614 return;
615}
616
617/**
618 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
619 * @flow_id: flow id
620 * @flow_type: flow type
621 * @flow_pool_id: pool id
622 * @flow_pool_size: pool size
623 *
624 * Process below target to host message
625 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
626 *
627 * Return: none
628 */
629void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
630 uint8_t flow_pool_id, uint16_t flow_pool_size)
631{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530632 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800633 struct ol_tx_flow_pool_t *pool;
634 uint8_t pool_create = 0;
635 enum htt_flow_type type = flow_type;
636
Poddar, Siddarth14521792017-03-14 21:19:42 +0530637 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
639 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
640
Anurag Chouhanc5548422016-02-24 18:33:27 +0530641 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530642 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800643 "%s: pdev is NULL", __func__);
644 return;
645 }
646 pdev->pool_stats.pool_map_count++;
647
648 pool = ol_tx_get_flow_pool(flow_pool_id);
649 if (!pool) {
650 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
651 if (pool == NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530652 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800653 "%s: creation of flow_pool %d size %d failed\n",
654 __func__, flow_pool_id, flow_pool_size);
655 return;
656 }
657 pool_create = 1;
658 }
659
660 switch (type) {
661
662 case FLOW_TYPE_VDEV:
663 ol_tx_flow_pool_vdev_map(pool, flow_id);
Himanshu Agarwald6f3c5a2017-03-30 13:54:17 +0530664 qdf_spin_lock_bh(&pool->flow_pool_lock);
665 pdev->pause_cb(flow_id,
666 WLAN_WAKE_ALL_NETIF_QUEUE,
667 WLAN_DATA_FLOW_CONTROL);
668 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800669 break;
670 default:
671 if (pool_create)
Nirav Shah7a0a9052016-04-14 16:52:21 +0530672 ol_tx_delete_flow_pool(pool, false);
Poddar, Siddarth14521792017-03-14 21:19:42 +0530673 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 "%s: flow type %d not supported !!!\n",
675 __func__, type);
676 break;
677 }
678
679 return;
680}
681
682/**
683 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
684 * @flow_id: flow id
685 * @flow_type: flow type
686 * @flow_pool_id: pool id
687 *
688 * Process below target to host message
689 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
690 *
691 * Return: none
692 */
693void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
694 uint8_t flow_pool_id)
695{
Anurag Chouhan6d760662016-02-20 16:05:43 +0530696 struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800697 struct ol_tx_flow_pool_t *pool;
698 enum htt_flow_type type = flow_type;
699
Poddar, Siddarth14521792017-03-14 21:19:42 +0530700 ol_txrx_dbg(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800701 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
702 __func__, flow_id, flow_type, flow_pool_id);
703
Anurag Chouhanc5548422016-02-24 18:33:27 +0530704 if (qdf_unlikely(!pdev)) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530705 ol_txrx_err(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800706 "%s: pdev is NULL", __func__);
707 return;
708 }
709 pdev->pool_stats.pool_unmap_count++;
710
711 pool = ol_tx_get_flow_pool(flow_pool_id);
712 if (!pool) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530713 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800714 "%s: flow_pool not available flow_pool_id %d\n",
715 __func__, type);
716 return;
717 }
718
719 switch (type) {
720
721 case FLOW_TYPE_VDEV:
722 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
723 break;
724 default:
Poddar, Siddarth14521792017-03-14 21:19:42 +0530725 ol_txrx_info(
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726 "%s: flow type %d not supported !!!\n",
727 __func__, type);
728 return;
729 }
730
731 /* only delete if all descriptors are available */
Nirav Shah7a0a9052016-04-14 16:52:21 +0530732 ol_tx_delete_flow_pool(pool, false);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800733
734 return;
735}
736
737