blob: fd759baee8dcdea442c640e991b965c1bdb36293 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/* OS abstraction libraries */
29#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
30#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
31#include <cdf_util.h> /* cdf_unlikely */
32
33/* APIs for other modules */
34#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
35#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
36#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
37#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
38
39/* internal header files relevant for all systems */
40#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
41#include <ol_txrx_types.h> /* pdev stats */
42#include <ol_tx_desc.h> /* ol_tx_desc */
43#include <ol_tx_send.h> /* ol_tx_send */
44#include <ol_txrx.h>
45
46/* internal header files relevant only for HL systems */
47#include <ol_tx_queue.h> /* ol_tx_enqueue */
48
49/* internal header files relevant only for specific systems (Pronto) */
50#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
51#include <ol_tx.h>
52#include <ol_cfg.h>
53
54#define TX_FLOW_START_TH 25
55#define TX_FLOW_STOP_TH 10
56#define INVALID_FLOW_ID 0xFF
57#define MAX_INVALID_BIN 3
58
59
60#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
61#define TX_FLOW_MGMT_POOL_ID 0xEF
62#define TX_FLOW_MGMT_POOL_SIZE 32
63
64/**
65 * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
66 * @pdev: pdev handler
67 *
68 * Return: none
69 */
70static void
71ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
72{
73 pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
74 TX_FLOW_MGMT_POOL_SIZE);
75 if (!pdev->mgmt_pool) {
76 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
77 "Management pool creation failed\n");
78 }
79 return;
80}
81
82/**
83 * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
84 * @pdev: pdev handler
85 *
86 * Return: none
87 */
88static void
89ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
90{
91 ol_tx_delete_flow_pool(pdev->mgmt_pool);
92 return;
93}
94#else
95static inline void
96ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
97{
98 return;
99}
100static inline void
101ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
102{
103 return;
104}
105#endif
106
107/**
108 * ol_tx_register_flow_control() - Register fw based tx flow control
109 * @pdev: pdev handle
110 *
111 * Return: none
112 */
113void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
114{
115 cdf_spinlock_init(&pdev->tx_desc.flow_pool_list_lock);
116 TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
117
118 ol_tx_register_global_mgmt_pool(pdev);
119}
120
121/**
122 * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
123 * @pdev: pdev handle
124 *
125 * Return: none
126 */
127void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
128{
129 ol_tx_deregister_global_mgmt_pool(pdev);
130
131 cdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
132 if (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
133 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
134 "flow pool list is not empty!!!\n");
135 }
136}
137
138/**
139 * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
140 *
141 * Return: none
142 */
143void ol_tx_dump_flow_pool_info(void)
144{
145 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
146 struct ol_tx_flow_pool_t *pool = NULL;
147 struct ol_tx_flow_pool_t tmp_pool;
148
149
150 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global Pool\n");
151 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Total %d :: Available %d\n",
152 pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
153 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid flow_pool %d\n",
154 pdev->tx_desc.num_invalid_bin);
155
156 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool map received %d\n",
157 pdev->pool_stats.pool_map_count);
158 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool unmap received %d\n",
159 pdev->pool_stats.pool_unmap_count);
160 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
161 "Pkt dropped due to unavailablity of pool %d\n",
162 pdev->pool_stats.pkt_drop_no_pool);
163 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
164 "Pkt dropped due to unavailablity of descriptors %d\n",
165 pdev->pool_stats.pkt_drop_no_desc);
166
167 /*
168 * Nested spin lock.
169 * Always take in below order.
170 * flow_pool_list_lock -> flow_pool_lock
171 */
172 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
173 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
174 flow_pool_list_elem) {
175 cdf_spin_lock_bh(&pool->flow_pool_lock);
176 cdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
177 cdf_spin_unlock_bh(&pool->flow_pool_lock);
178 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
179 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
180 "Flow_pool_id %d :: status %d\n",
181 tmp_pool.flow_pool_id, tmp_pool.status);
182 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
183 "Total %d :: Available %d :: Deficient %d\n",
184 tmp_pool.flow_pool_size, tmp_pool.avail_desc,
185 tmp_pool.deficient_desc);
186 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
187 "Start_TH %d :: Stop_TH %d\n",
188 tmp_pool.start_th, tmp_pool.stop_th);
189 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
190 "Member flow_id %d :: flow_type %d\n",
191 tmp_pool.member_flow_id, tmp_pool.flow_type);
192 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
193 }
194 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
195
196 return;
197}
198
199/**
200 * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
201 *
202 * Return: none
203 */
204void ol_tx_clear_flow_pool_stats(void)
205{
206 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
207
208 if (!pdev) {
209 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev is null\n",
210 __func__);
211 return;
212 }
213 cdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
214}
215
216/**
217 * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
218 * @src_pool: source pool
219 * @dst_pool: destination pool
220 * @desc_move_count: descriptor move count
221 *
222 * Return: actual descriptors moved
223 */
224static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
225 struct ol_tx_flow_pool_t *dst_pool,
226 int desc_move_count)
227{
228 uint16_t count = 0, i;
229 struct ol_tx_desc_t *tx_desc;
230 union ol_tx_desc_list_elem_t *temp_list = NULL;
231
232 /* Take descriptors from source pool and put it in temp_list */
233 cdf_spin_lock_bh(&src_pool->flow_pool_lock);
234 for (i = 0; i < desc_move_count; i++) {
235 tx_desc = &src_pool->freelist->tx_desc;
236 src_pool->freelist = src_pool->freelist->next;
237 src_pool->avail_desc--;
238 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
239 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
240
241 }
242 cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
243
244 /* Take descriptors from temp_list and put it in destination pool */
245 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
246 for (i = 0; i < desc_move_count; i++) {
247 if (dst_pool->deficient_desc)
248 dst_pool->deficient_desc--;
249 else
250 break;
251 tx_desc = &temp_list->tx_desc;
252 temp_list = temp_list->next;
253 tx_desc->pool = dst_pool;
254 ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
255 dst_pool->freelist;
256 dst_pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc;
257 dst_pool->avail_desc++;
258 count++;
259 }
260 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
261
262 /* If anything is there in temp_list put it back to source pool */
263 cdf_spin_lock_bh(&src_pool->flow_pool_lock);
264 while (temp_list) {
265 tx_desc = &temp_list->tx_desc;
266 temp_list = temp_list->next;
267 tx_desc->pool = src_pool;
268 ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
269 src_pool->freelist;
270 src_pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc;
271 src_pool->avail_desc++;
272 }
273 cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
274
275 return count;
276}
277
278
279/**
280 * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
281 * @src_pool: source pool
282 *
283 * Distribute all descriptors of source pool to all
284 * deficient pools as per flow_pool_list.
285 *
286 * Return: 0 for sucess
287 */
288int
289ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
290{
291 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
292 struct ol_tx_flow_pool_t *dst_pool = NULL;
293 uint16_t desc_count = src_pool->avail_desc;
294 uint16_t desc_move_count = 0;
295
296 if (!pdev) {
297 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
298 "%s: pdev is NULL\n", __func__);
299 return -EINVAL;
300 }
301 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
302 TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
303 flow_pool_list_elem) {
304 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
305 if (dst_pool->deficient_desc) {
306 desc_move_count =
307 (dst_pool->deficient_desc > desc_count) ?
308 desc_count : dst_pool->deficient_desc;
309 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
310 desc_move_count = ol_tx_move_desc_n(src_pool,
311 dst_pool, desc_move_count);
312 desc_count -= desc_move_count;
313 cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
314 }
315 cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
316 if (desc_count == 0)
317 break;
318 }
319 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
320
321 return 0;
322}
323
324
325/**
326 * ol_tx_create_flow_pool() - create flow pool
327 * @flow_pool_id: flow pool id
328 * @flow_pool_size: flow pool size
329 *
330 * Return: flow_pool pointer / NULL for error
331 */
332struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
333 uint16_t flow_pool_size)
334{
335 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
336 struct ol_tx_flow_pool_t *pool;
337 uint16_t size = 0, i;
338 struct ol_tx_desc_t *tx_desc;
339 union ol_tx_desc_list_elem_t *temp_list = NULL;
340 uint32_t stop_threshold =
341 ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
342 uint32_t start_threshold = stop_threshold +
343 ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
344
345 if (!pdev) {
346 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
347 "%s: pdev is NULL\n", __func__);
348 return NULL;
349 }
350
351 pool = cdf_mem_malloc(sizeof(*pool));
352 if (!pool) {
353 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
354 "%s: malloc failed\n", __func__);
355 return NULL;
356 }
357
358 pool->flow_pool_id = flow_pool_id;
359 pool->flow_pool_size = flow_pool_size;
360 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
361 pool->start_th = (start_threshold * flow_pool_size)/100;
362 pool->stop_th = (stop_threshold * flow_pool_size)/100;
363 cdf_spinlock_init(&pool->flow_pool_lock);
364
365 /* Take TX descriptor from global_pool and put it in temp_list*/
366 cdf_spin_lock_bh(&pdev->tx_mutex);
367 if (pdev->tx_desc.num_free >= pool->flow_pool_size)
368 size = pool->flow_pool_size;
369 else
370 size = pdev->tx_desc.num_free;
371
372 for (i = 0; i < size; i++) {
373 pdev->tx_desc.num_free--;
374 tx_desc = &pdev->tx_desc.freelist->tx_desc;
375 pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
376 tx_desc->pool = pool;
377 ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
378 temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
379
380 }
381 cdf_spin_unlock_bh(&pdev->tx_mutex);
382
383 /* put temp_list to flow_pool */
384 pool->freelist = temp_list;
385 pool->avail_desc = size;
386 pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
387
388 /* Add flow_pool to flow_pool_list */
389 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
390 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
391 flow_pool_list_elem);
392 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
393
394 return pool;
395}
396
397/**
398 * ol_tx_delete_flow_pool() - delete flow pool
399 * @pool: flow pool pointer
400 *
401 * Delete flow_pool if all tx descriptors are available.
402 * Otherwise put it in FLOW_POOL_INVALID state.
403 *
404 * Return: 0 for success or error
405 */
406int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
407{
408 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
409 uint16_t i, size;
410 union ol_tx_desc_list_elem_t *temp_list = NULL;
411 struct ol_tx_desc_t *tx_desc = NULL;
412
413 if (!pool) {
414 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
415 "%s: pool is NULL\n", __func__);
416 return -ENOMEM;
417 }
418
419 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
420 TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
421 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
422
423 cdf_spin_lock_bh(&pool->flow_pool_lock);
424 if (pool->avail_desc == pool->flow_pool_size)
425 pool->status = FLOW_POOL_INACTIVE;
426 else
427 pool->status = FLOW_POOL_INVALID;
428
429 /* Take all free descriptors and put it in temp_list */
430 temp_list = pool->freelist;
431 size = pool->avail_desc;
432 pool->freelist = NULL;
433 pool->avail_desc = 0;
434
435 if (pool->status == FLOW_POOL_INACTIVE) {
436 cdf_spin_unlock_bh(&pool->flow_pool_lock);
437 /* Free flow_pool */
438 cdf_spinlock_destroy(&pool->flow_pool_lock);
439 cdf_mem_free(pool);
440 } else { /* FLOW_POOL_INVALID case*/
441 pool->flow_pool_size -= size;
442 pool->flow_pool_id = INVALID_FLOW_ID;
443 cdf_spin_unlock_bh(&pool->flow_pool_lock);
444
445 pdev->tx_desc.num_invalid_bin++;
446 if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
447 ASSERT(0);
448
449 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
450 TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
451 flow_pool_list_elem);
452 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
453 }
454
455 /* put free descriptors to global pool */
456 cdf_spin_lock_bh(&pdev->tx_mutex);
457 for (i = 0; i < size; i++) {
458 tx_desc = &temp_list->tx_desc;
459 temp_list = temp_list->next;
460
461 ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
462 pdev->tx_desc.freelist;
463 pdev->tx_desc.freelist =
464 (union ol_tx_desc_list_elem_t *)tx_desc;
465 pdev->tx_desc.num_free++;
466
467 }
468 cdf_spin_unlock_bh(&pdev->tx_mutex);
469
470 return 0;
471}
472
473
474/**
475 * ol_tx_free_invalid_flow_pool() - free invalid pool
476 * @pool: pool
477 *
478 * Return: 0 for success or failure
479 */
480int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
481{
482 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
483
484 if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
485 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
486 "%s: Invalid pool/pdev\n", __func__);
487 return -EINVAL;
488 }
489
490 /* direclty distribute to other deficient pools */
491 ol_tx_distribute_descs_to_deficient_pools(pool);
492
493 cdf_spin_lock_bh(&pool->flow_pool_lock);
494 pool->flow_pool_size = pool->avail_desc;
495 cdf_spin_unlock_bh(&pool->flow_pool_lock);
496
497 pdev->tx_desc.num_invalid_bin--;
498
499 return ol_tx_delete_flow_pool(pool);
500}
501
502/**
503 * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
504 * @flow_pool_id: flow pool id
505 *
506 * Return: flow_pool ptr / NULL if not found
507 */
508struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
509{
510 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
511 struct ol_tx_flow_pool_t *pool = NULL;
512 bool is_found = false;
513
514 cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
515 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
516 flow_pool_list_elem) {
517 cdf_spin_lock_bh(&pool->flow_pool_lock);
518 if (pool->flow_pool_id == flow_pool_id) {
519 cdf_spin_unlock_bh(&pool->flow_pool_lock);
520 is_found = true;
521 break;
522 }
523 cdf_spin_unlock_bh(&pool->flow_pool_lock);
524 }
525 cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
526
527 if (is_found == false)
528 pool = NULL;
529
530 return pool;
531
532}
533
534
535/**
536 * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
537 * @pool: flow_pool
538 * @vdev_id: flow_id /vdev_id
539 *
540 * Return: none
541 */
542void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
543 uint8_t vdev_id)
544{
545 ol_txrx_vdev_handle vdev;
546
547 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
548 if (!vdev) {
549 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
550 "%s: invalid vdev_id %d\n",
551 __func__, vdev_id);
552 return;
553 }
554
555 vdev->pool = pool;
556 cdf_spin_lock_bh(&pool->flow_pool_lock);
557 pool->member_flow_id = vdev_id;
558 cdf_spin_unlock_bh(&pool->flow_pool_lock);
559
560 return;
561}
562
563/**
564 * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
565 * @pool: flow_pool
566 * @vdev_id: flow_id /vdev_id
567 *
568 * Return: none
569 */
570void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
571 uint8_t vdev_id)
572{
573 ol_txrx_vdev_handle vdev;
574
575 vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
576 if (!vdev) {
577 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
578 "%s: invalid vdev_id %d\n",
579 __func__, vdev_id);
580 return;
581 }
582
583 vdev->pool = NULL;
584 cdf_spin_lock_bh(&pool->flow_pool_lock);
585 pool->member_flow_id = INVALID_FLOW_ID;
586 cdf_spin_unlock_bh(&pool->flow_pool_lock);
587
588 return;
589}
590
591/**
592 * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
593 * @flow_id: flow id
594 * @flow_type: flow type
595 * @flow_pool_id: pool id
596 * @flow_pool_size: pool size
597 *
598 * Process below target to host message
599 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
600 *
601 * Return: none
602 */
603void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
604 uint8_t flow_pool_id, uint16_t flow_pool_size)
605{
606 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
607 struct ol_tx_flow_pool_t *pool;
608 uint8_t pool_create = 0;
609 enum htt_flow_type type = flow_type;
610
611
612 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
613 "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
614 __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
615
616 if (cdf_unlikely(!pdev)) {
617 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
618 "%s: pdev is NULL", __func__);
619 return;
620 }
621 pdev->pool_stats.pool_map_count++;
622
623 pool = ol_tx_get_flow_pool(flow_pool_id);
624 if (!pool) {
625 pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
626 if (pool == NULL) {
627 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
628 "%s: creation of flow_pool %d size %d failed\n",
629 __func__, flow_pool_id, flow_pool_size);
630 return;
631 }
632 pool_create = 1;
633 }
634
635 switch (type) {
636
637 case FLOW_TYPE_VDEV:
638 ol_tx_flow_pool_vdev_map(pool, flow_id);
639 break;
640 default:
641 if (pool_create)
642 ol_tx_delete_flow_pool(pool);
643 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
644 "%s: flow type %d not supported !!!\n",
645 __func__, type);
646 break;
647 }
648
649 return;
650}
651
652/**
653 * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
654 * @flow_id: flow id
655 * @flow_type: flow type
656 * @flow_pool_id: pool id
657 *
658 * Process below target to host message
659 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
660 *
661 * Return: none
662 */
663void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
664 uint8_t flow_pool_id)
665{
666 struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
667 struct ol_tx_flow_pool_t *pool;
668 enum htt_flow_type type = flow_type;
669
670 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
671 "%s: flow_id %d flow_type %d flow_pool_id %d\n",
672 __func__, flow_id, flow_type, flow_pool_id);
673
674 if (cdf_unlikely(!pdev)) {
675 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
676 "%s: pdev is NULL", __func__);
677 return;
678 }
679 pdev->pool_stats.pool_unmap_count++;
680
681 pool = ol_tx_get_flow_pool(flow_pool_id);
682 if (!pool) {
683 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
684 "%s: flow_pool not available flow_pool_id %d\n",
685 __func__, type);
686 return;
687 }
688
689 switch (type) {
690
691 case FLOW_TYPE_VDEV:
692 ol_tx_flow_pool_vdev_unmap(pool, flow_id);
693 break;
694 default:
695 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
696 "%s: flow type %d not supported !!!\n",
697 __func__, type);
698 return;
699 }
700
701 /* only delete if all descriptors are available */
702 ol_tx_delete_flow_pool(pool);
703
704 return;
705}
706
707