blob: 77fd41ee1ed4b2f0d35b47aa39ea5dab5be4f997 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
29#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
30#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
31#include <cdf_util.h> /* cdf_unlikely */
32
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
36#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
37#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
38
39/* internal header files relevant for all systems */
40#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
41#include <ol_txrx_types.h> /* pdev stats */
42#include <ol_tx_desc.h> /* ol_tx_desc */
43#include <ol_tx_send.h> /* ol_tx_send */
44#include <ol_txrx.h>
45
46/* internal header files relevant only for HL systems */
47#include <ol_tx_queue.h> /* ol_tx_enqueue */
48
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52#include <ol_cfg.h>
53
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054#define INVALID_FLOW_ID 0xFF
55#define MAX_INVALID_BIN 3
56
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080057#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
58#define TX_FLOW_MGMT_POOL_ID 0xEF
59#define TX_FLOW_MGMT_POOL_SIZE 32
60
61/**
62 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
63 * @pdev: pdev handler
64 *
65 * Return: none
66 */
67static void
68ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
69{
70 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
71 TX_FLOW_MGMT_POOL_SIZE);
72 if (!pdev->mgmt_pool) {
73 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
74 "Management pool creation failed\n");
75 }
76 return;
77}
78
79/**
80 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
81 * @pdev: pdev handler
82 *
83 * Return: none
84 */
85static void
86ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
87{
88 ol_tx_delete_flow_pool(pdev->mgmt_pool);
89 return;
90}
91#else
92static inline void
93ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
94{
95 return;
96}
97static inline void
98ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
99{
100 return;
101}
102#endif
103
104/**
105 * ol_tx_register_flow_control() - Register fw based tx flow control
106 * @pdev: pdev handle
107 *
108 * Return: none
109 */
110void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
111{
112 cdf_spinlock_init(&pdev->tx_desc.flow_pool_list_lock);
113 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
114
Nirav Shah22bf44d2015-12-10 15:39:48 +0530115 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
116 ol_tx_register_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800117}
118
119/**
120 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
121 * @pdev: pdev handle
122 *
123 * Return: none
124 */
125void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
126{
Nirav Shah22bf44d2015-12-10 15:39:48 +0530127 if (!ol_tx_get_is_mgmt_over_wmi_enabled())
128 ol_tx_deregister_global_mgmt_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800129
130 cdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
131 if (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
132 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
133 "flow pool list is not empty!!!\n");
134 }
135}
136
137/**
138 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
139 *
140 * Return: none
141 */
142void ol_tx_dump_flow_pool_info(void)
143{
144 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
145 struct ol_tx_flow_pool_t *pool = NULL;
146 struct ol_tx_flow_pool_t tmp_pool;
147
148
149 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global Pool\n");
150 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Total %d :: Available %d\n",
151 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
152 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid flow_pool %d\n",
153 pdev->tx_desc.num_invalid_bin);
154
155 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool map received %d\n",
156 pdev->pool_stats.pool_map_count);
157 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool unmap received %d\n",
158 pdev->pool_stats.pool_unmap_count);
159 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
160 "Pkt dropped due to unavailablity of pool %d\n",
161 pdev->pool_stats.pkt_drop_no_pool);
162 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
163 "Pkt dropped due to unavailablity of descriptors %d\n",
164 pdev->pool_stats.pkt_drop_no_desc);
165
166 /*
167 * Nested spin lock.
168 * Always take in below order.
169 * flow_pool_list_lock -> flow_pool_lock
170 */
171 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
172 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
173 flow_pool_list_elem) {
174 cdf_spin_lock_bh(&pool->flow_pool_lock);
175 cdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
176 cdf_spin_unlock_bh(&pool->flow_pool_lock);
177 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
178 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
179 "Flow_pool_id %d :: status %d\n",
180 tmp_pool.flow_pool_id, tmp_pool.status);
181 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
182 "Total %d :: Available %d :: Deficient %d\n",
183 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
184 tmp_pool.deficient_desc);
185 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
186 "Start_TH %d :: Stop_TH %d\n",
187 tmp_pool.start_th, tmp_pool.stop_th);
188 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
189 "Member flow_id %d :: flow_type %d\n",
190 tmp_pool.member_flow_id, tmp_pool.flow_type);
191 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
192 }
193 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
194
195 return;
196}
197
198/**
199 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
200 *
201 * Return: none
202 */
203void ol_tx_clear_flow_pool_stats(void)
204{
205 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
206
207 if (!pdev) {
208 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev is null\n",
209 __func__);
210 return;
211 }
212 cdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
213}
214
215/**
216 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
217 * @src_pool: source pool
218 * @dst_pool: destination pool
219 * @desc_move_count: descriptor move count
220 *
221 * Return: actual descriptors moved
222 */
223static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
224 struct ol_tx_flow_pool_t *dst_pool,
225 int desc_move_count)
226{
227 uint16_t count = 0, i;
228 struct ol_tx_desc_t *tx_desc;
229 union ol_tx_desc_list_elem_t *temp_list = NULL;
230
231 /* Take descriptors from source pool and put it in temp_list */
232 cdf_spin_lock_bh(&src_pool->flow_pool_lock);
233 for (i = 0; i < desc_move_count; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700234 tx_desc = ol_tx_get_desc_flow_pool(src_pool);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800235 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
236 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
237
238 }
239 cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
240
241 /* Take descriptors from temp_list and put it in destination pool */
242 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
243 for (i = 0; i < desc_move_count; i++) {
244 if (dst_pool->deficient_desc)
245 dst_pool->deficient_desc--;
246 else
247 break;
248 tx_desc = &temp_list->tx_desc;
249 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700250 ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251 count++;
252 }
253 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
254
255 /* If anything is there in temp_list put it back to source pool */
256 cdf_spin_lock_bh(&src_pool->flow_pool_lock);
257 while (temp_list) {
258 tx_desc = &temp_list->tx_desc;
259 temp_list = temp_list->next;
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700260 ol_tx_put_desc_flow_pool(src_pool, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800261 }
262 cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
263
264 return count;
265}
266
267
268/**
269 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
270 * @src_pool: source pool
271 *
272 * Distribute all descriptors of source pool to all
273 * deficient pools as per flow_pool_list.
274 *
275 * Return: 0 for sucess
276 */
277int
278ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
279{
280 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
281 struct ol_tx_flow_pool_t *dst_pool = NULL;
282 uint16_t desc_count = src_pool->avail_desc;
283 uint16_t desc_move_count = 0;
284
285 if (!pdev) {
286 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
287 "%s: pdev is NULL\n", __func__);
288 return -EINVAL;
289 }
290 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
291 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
292 flow_pool_list_elem) {
293 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
294 if (dst_pool->deficient_desc) {
295 desc_move_count =
296 (dst_pool->deficient_desc > desc_count) ?
297 desc_count : dst_pool->deficient_desc;
298 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
299 desc_move_count = ol_tx_move_desc_n(src_pool,
300 dst_pool, desc_move_count);
301 desc_count -= desc_move_count;
302 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
303 }
304 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
305 if (desc_count == 0)
306 break;
307 }
308 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
309
310 return 0;
311}
312
313
314/**
315 * ol_tx_create_flow_pool() - create flow pool
316 * @flow_pool_id: flow pool id
317 * @flow_pool_size: flow pool size
318 *
319 * Return: flow_pool pointer / NULL for error
320 */
321struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
322 uint16_t flow_pool_size)
323{
324 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
325 struct ol_tx_flow_pool_t *pool;
326 uint16_t size = 0, i;
327 struct ol_tx_desc_t *tx_desc;
328 union ol_tx_desc_list_elem_t *temp_list = NULL;
329 uint32_t stop_threshold =
330 ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
331 uint32_t start_threshold = stop_threshold +
332 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
333
334 if (!pdev) {
335 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
336 "%s: pdev is NULL\n", __func__);
337 return NULL;
338 }
339
340 pool = cdf_mem_malloc(sizeof(*pool));
341 if (!pool) {
342 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
343 "%s: malloc failed\n", __func__);
344 return NULL;
345 }
346
347 pool->flow_pool_id = flow_pool_id;
348 pool->flow_pool_size = flow_pool_size;
349 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
350 pool->start_th = (start_threshold * flow_pool_size)/100;
351 pool->stop_th = (stop_threshold * flow_pool_size)/100;
352 cdf_spinlock_init(&pool->flow_pool_lock);
353
354 /* Take TX descriptor from global_pool and put it in temp_list*/
355 cdf_spin_lock_bh(&pdev->tx_mutex);
356 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
357 size = pool->flow_pool_size;
358 else
359 size = pdev->tx_desc.num_free;
360
361 for (i = 0; i < size; i++) {
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700362 tx_desc = ol_tx_get_desc_global_pool(pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800363 tx_desc->pool = pool;
364 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
365 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
366
367 }
368 cdf_spin_unlock_bh(&pdev->tx_mutex);
369
370 /* put temp_list to flow_pool */
371 pool->freelist = temp_list;
372 pool->avail_desc = size;
373 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
374
375 /* Add flow_pool to flow_pool_list */
376 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
377 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
378 flow_pool_list_elem);
379 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
380
381 return pool;
382}
383
384/**
385 * ol_tx_delete_flow_pool() - delete flow pool
386 * @pool: flow pool pointer
387 *
388 * Delete flow_pool if all tx descriptors are available.
389 * Otherwise put it in FLOW_POOL_INVALID state.
390 *
391 * Return: 0 for success or error
392 */
393int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
394{
395 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
396 uint16_t i, size;
397 union ol_tx_desc_list_elem_t *temp_list = NULL;
398 struct ol_tx_desc_t *tx_desc = NULL;
399
400 if (!pool) {
401 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
402 "%s: pool is NULL\n", __func__);
403 return -ENOMEM;
404 }
405
406 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
407 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
408 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
409
410 cdf_spin_lock_bh(&pool->flow_pool_lock);
411 if (pool->avail_desc == pool->flow_pool_size)
412 pool->status = FLOW_POOL_INACTIVE;
413 else
414 pool->status = FLOW_POOL_INVALID;
415
416 /* Take all free descriptors and put it in temp_list */
417 temp_list = pool->freelist;
418 size = pool->avail_desc;
419 pool->freelist = NULL;
420 pool->avail_desc = 0;
421
422 if (pool->status == FLOW_POOL_INACTIVE) {
423 cdf_spin_unlock_bh(&pool->flow_pool_lock);
424 /* Free flow_pool */
425 cdf_spinlock_destroy(&pool->flow_pool_lock);
426 cdf_mem_free(pool);
427 } else { /* FLOW_POOL_INVALID case*/
428 pool->flow_pool_size -= size;
429 pool->flow_pool_id = INVALID_FLOW_ID;
430 cdf_spin_unlock_bh(&pool->flow_pool_lock);
431
432 pdev->tx_desc.num_invalid_bin++;
Nirav Shah2ae038d2015-12-23 20:36:11 +0530433 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
434 "%s: invalid pool created %d\n",
435 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800436 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
437 ASSERT(0);
438
439 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
440 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
441 flow_pool_list_elem);
442 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
443 }
444
445 /* put free descriptors to global pool */
446 cdf_spin_lock_bh(&pdev->tx_mutex);
447 for (i = 0; i < size; i++) {
448 tx_desc = &temp_list->tx_desc;
449 temp_list = temp_list->next;
450
Nirav Shah9d7f2e82015-09-28 11:09:09 -0700451 ol_tx_put_desc_global_pool(pdev, tx_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800452 }
453 cdf_spin_unlock_bh(&pdev->tx_mutex);
454
455 return 0;
456}
457
458
459/**
460 * ol_tx_free_invalid_flow_pool() - free invalid pool
461 * @pool: pool
462 *
463 * Return: 0 for success or failure
464 */
465int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
466{
467 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
468
469 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
470 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
471 "%s: Invalid pool/pdev\n", __func__);
472 return -EINVAL;
473 }
474
475 /* direclty distribute to other deficient pools */
476 ol_tx_distribute_descs_to_deficient_pools(pool);
477
478 cdf_spin_lock_bh(&pool->flow_pool_lock);
479 pool->flow_pool_size = pool->avail_desc;
480 cdf_spin_unlock_bh(&pool->flow_pool_lock);
481
482 pdev->tx_desc.num_invalid_bin--;
Nirav Shah2ae038d2015-12-23 20:36:11 +0530483 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
484 "%s: invalid pool deleted %d\n",
485 __func__, pdev->tx_desc.num_invalid_bin);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486
487 return ol_tx_delete_flow_pool(pool);
488}
489
490/**
491 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
492 * @flow_pool_id: flow pool id
493 *
494 * Return: flow_pool ptr / NULL if not found
495 */
496struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
497{
498 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
499 struct ol_tx_flow_pool_t *pool = NULL;
500 bool is_found = false;
501
502 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
503 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
504 flow_pool_list_elem) {
505 cdf_spin_lock_bh(&pool->flow_pool_lock);
506 if (pool->flow_pool_id == flow_pool_id) {
507 cdf_spin_unlock_bh(&pool->flow_pool_lock);
508 is_found = true;
509 break;
510 }
511 cdf_spin_unlock_bh(&pool->flow_pool_lock);
512 }
513 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
514
515 if (is_found == false)
516 pool = NULL;
517
518 return pool;
519
520}
521
522
523/**
524 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
525 * @pool: flow_pool
526 * @vdev_id: flow_id /vdev_id
527 *
528 * Return: none
529 */
530void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
531 uint8_t vdev_id)
532{
533 ol_txrx_vdev_handle vdev;
534
535 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
536 if (!vdev) {
537 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
538 "%s: invalid vdev_id %d\n",
539 __func__, vdev_id);
540 return;
541 }
542
543 vdev->pool = pool;
544 cdf_spin_lock_bh(&pool->flow_pool_lock);
545 pool->member_flow_id = vdev_id;
546 cdf_spin_unlock_bh(&pool->flow_pool_lock);
547
548 return;
549}
550
551/**
552 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
553 * @pool: flow_pool
554 * @vdev_id: flow_id /vdev_id
555 *
556 * Return: none
557 */
558void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
559 uint8_t vdev_id)
560{
561 ol_txrx_vdev_handle vdev;
562
563 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
564 if (!vdev) {
565 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
566 "%s: invalid vdev_id %d\n",
567 __func__, vdev_id);
568 return;
569 }
570
571 vdev->pool = NULL;
572 cdf_spin_lock_bh(&pool->flow_pool_lock);
573 pool->member_flow_id = INVALID_FLOW_ID;
574 cdf_spin_unlock_bh(&pool->flow_pool_lock);
575
576 return;
577}
578
579/**
580 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
581 * @flow_id: flow id
582 * @flow_type: flow type
583 * @flow_pool_id: pool id
584 * @flow_pool_size: pool size
585 *
586 * Process below target to host message
587 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
588 *
589 * Return: none
590 */
591void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
592 uint8_t flow_pool_id, uint16_t flow_pool_size)
593{
594 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
595 struct ol_tx_flow_pool_t *pool;
596 uint8_t pool_create = 0;
597 enum htt_flow_type type = flow_type;
598
599
600 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
601 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
602 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
603
604 if (cdf_unlikely(!pdev)) {
605 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
606 "%s: pdev is NULL", __func__);
607 return;
608 }
609 pdev->pool_stats.pool_map_count++;
610
611 pool = ol_tx_get_flow_pool(flow_pool_id);
612 if (!pool) {
613 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
614 if (pool == NULL) {
615 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
616 "%s: creation of flow_pool %d size %d failed\n",
617 __func__, flow_pool_id, flow_pool_size);
618 return;
619 }
620 pool_create = 1;
621 }
622
623 switch (type) {
624
625 case FLOW_TYPE_VDEV:
626 ol_tx_flow_pool_vdev_map(pool, flow_id);
627 break;
628 default:
629 if (pool_create)
630 ol_tx_delete_flow_pool(pool);
631 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
632 "%s: flow type %d not supported !!!\n",
633 __func__, type);
634 break;
635 }
636
637 return;
638}
639
640/**
641 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
642 * @flow_id: flow id
643 * @flow_type: flow type
644 * @flow_pool_id: pool id
645 *
646 * Process below target to host message
647 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
648 *
649 * Return: none
650 */
651void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
652 uint8_t flow_pool_id)
653{
654 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
655 struct ol_tx_flow_pool_t *pool;
656 enum htt_flow_type type = flow_type;
657
658 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
659 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
660 __func__, flow_id, flow_type, flow_pool_id);
661
662 if (cdf_unlikely(!pdev)) {
663 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
664 "%s: pdev is NULL", __func__);
665 return;
666 }
667 pdev->pool_stats.pool_unmap_count++;
668
669 pool = ol_tx_get_flow_pool(flow_pool_id);
670 if (!pool) {
671 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
672 "%s: flow_pool not available flow_pool_id %d\n",
673 __func__, type);
674 return;
675 }
676
677 switch (type) {
678
679 case FLOW_TYPE_VDEV:
680 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
681 break;
682 default:
683 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
684 "%s: flow type %d not supported !!!\n",
685 __func__, type);
686 return;
687 }
688
689 /* only delete if all descriptors are available */
690 ol_tx_delete_flow_pool(pool);
691
692 return;
693}
694
695