blob: 1c8c828b8a412b53828a300d5ea80cb1a691e36f [file] [log] [blame]
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07001/*
Jeff Johnsona8edf332019-03-18 09:51:52 -07002 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07003 *
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -07004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -070019#include <cds_api.h>
20
21/* OS abstraction libraries */
22#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
23#include <qdf_atomic.h> /* qdf_atomic_read, etc. */
24#include <qdf_util.h> /* qdf_unlikely */
25#include "dp_types.h"
26#include "dp_tx_desc.h"
27
28#include <cdp_txrx_handle.h>
29#include "dp_internal.h"
30#define INVALID_FLOW_ID 0xFF
31#define MAX_INVALID_BIN 3
32
chenguof44ac202018-08-28 18:58:52 +080033#ifdef QCA_AC_BASED_FLOW_CONTROL
34/**
35 * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
36 * @pool: flow_pool
37 * @stop_threshold: stop threshold of certian AC
38 * @start_threshold: start threshold of certian AC
39 * @flow_pool_size: flow pool size
40 *
41 * Return: none
42 */
43static inline void
44dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
45 uint32_t start_threshold,
46 uint32_t stop_threshold,
47 uint16_t flow_pool_size)
48{
49 /* BE_BK threshold is same as previous threahold */
50 pool->start_th[DP_TH_BE_BK] = (start_threshold
51 * flow_pool_size) / 100;
52 pool->stop_th[DP_TH_BE_BK] = (stop_threshold
53 * flow_pool_size) / 100;
54
55 /* Update VI threshold based on BE_BK threashold */
56 pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
57 * FL_TH_VI_PERCENTAGE) / 100;
58 pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
59 * FL_TH_VI_PERCENTAGE) / 100;
60
61 /* Update VO threshold based on BE_BK threashold */
62 pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
63 * FL_TH_VO_PERCENTAGE) / 100;
64 pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
65 * FL_TH_VO_PERCENTAGE) / 100;
66
67 /* Update High Priority threshold based on BE_BK threashold */
68 pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
69 * FL_TH_HI_PERCENTAGE) / 100;
70 pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
71 * FL_TH_HI_PERCENTAGE) / 100;
72
73 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
74 "%s: tx flow control threshold is set, pool size is %d",
75 __func__, flow_pool_size);
76}
77
78/**
79 * dp_tx_flow_pool_reattach() - Reattach flow_pool
80 * @pool: flow_pool
81 *
82 * Return: none
83 */
84static inline void
85dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
86{
87 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
88 "%s: flow pool already allocated, attached %d times",
89 __func__, pool->pool_create_cnt);
90
91 if (pool->avail_desc > pool->start_th[DP_TH_BE_BK])
92 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
93 if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] &&
94 pool->avail_desc > pool->start_th[DP_TH_VI])
95 pool->status = FLOW_POOL_BE_BK_PAUSED;
96 else if (pool->avail_desc <= pool->start_th[DP_TH_VI] &&
97 pool->avail_desc > pool->start_th[DP_TH_VO])
98 pool->status = FLOW_POOL_VI_PAUSED;
99 else if (pool->avail_desc <= pool->start_th[DP_TH_VO] &&
100 pool->avail_desc > pool->start_th[DP_TH_HI])
101 pool->status = FLOW_POOL_VO_PAUSED;
102 else
103 pool->status = FLOW_POOL_ACTIVE_PAUSED;
104
105 pool->pool_create_cnt++;
106}
107
108/**
109 * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
110 * @pool: flow_pool
111 *
112 * Return: none
113 */
114static inline void
115dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
116{
117 int i;
118
119 for (i = 0; i < FL_TH_MAX; i++) {
120 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
121 "Level %d :: Start threshold %d :: Stop threshold %d",
122 i, pool->start_th[i], pool->stop_th[i]);
123 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
124 "Level %d :: Maximun pause time %lu ms",
125 i, pool->max_pause_time[i]);
126 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
127 "Level %d :: Latest pause timestamp %lu",
128 i, pool->latest_pause_time[i]);
129 }
130}
131
132#else
133static inline void
134dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
135 uint32_t start_threshold,
136 uint32_t stop_threshold,
137 uint16_t flow_pool_size)
138
139{
140 /* INI is in percentage so divide by 100 */
141 pool->start_th = (start_threshold * flow_pool_size) / 100;
142 pool->stop_th = (stop_threshold * flow_pool_size) / 100;
143}
144
145static inline void
146dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
147{
148 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
149 "%s: flow pool already allocated, attached %d times",
150 __func__, pool->pool_create_cnt);
151 if (pool->avail_desc > pool->start_th)
152 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
153 else
154 pool->status = FLOW_POOL_ACTIVE_PAUSED;
155
156 pool->pool_create_cnt++;
157}
158
159static inline void
160dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
161{
162 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
163 "Start threshold %d :: Stop threshold %d",
164 pool->start_th, pool->stop_th);
165}
166
167#endif
168
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700169/**
170 * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
171 *
172 * @ctx: Handle to struct dp_soc.
173 *
174 * Return: none
175 */
Rakesh Pillaidce01372019-06-28 19:11:23 +0530176void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700177{
Rakesh Pillaidce01372019-06-28 19:11:23 +0530178 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700179 struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
180 struct dp_tx_desc_pool_s *pool = NULL;
181 struct dp_tx_desc_pool_s tmp_pool;
182 int i;
183
184 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
185 "No of pool map received %d", pool_stats->pool_map_count);
186 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
187 "No of pool unmap received %d", pool_stats->pool_unmap_count);
188 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
189 "Pkt dropped due to unavailablity of pool %d",
190 pool_stats->pkt_drop_no_pool);
191
192 /*
193 * Nested spin lock.
194 * Always take in below order.
195 * flow_pool_array_lock -> flow_pool_lock
196 */
197 qdf_spin_lock_bh(&soc->flow_pool_array_lock);
198 for (i = 0; i < MAX_TXDESC_POOLS; i++) {
199 pool = &soc->tx_desc[i];
200 if (pool->status > FLOW_POOL_INVALID)
201 continue;
202 qdf_spin_lock_bh(&pool->flow_pool_lock);
203 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
204 qdf_spin_unlock_bh(&pool->flow_pool_lock);
205 qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
206 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
207 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
208 "Flow_pool_id %d :: status %d",
209 tmp_pool.flow_pool_id, tmp_pool.status);
210 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
211 "Total %d :: Available %d",
212 tmp_pool.pool_size, tmp_pool.avail_desc);
chenguof44ac202018-08-28 18:58:52 +0800213 dp_tx_flow_pool_dump_threshold(&tmp_pool);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700214 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
215 "Member flow_id %d :: flow_type %d",
216 tmp_pool.flow_pool_id, tmp_pool.flow_type);
217 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
218 "Pkt dropped due to unavailablity of descriptors %d",
219 tmp_pool.pkt_drop_no_desc);
220 qdf_spin_lock_bh(&soc->flow_pool_array_lock);
221 }
222 qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
223}
224
225/**
226 * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
227 *
228 * @soc: Handle to struct dp_soc.
229 *
230 * Return: None
231 */
232void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
233{
234
235 if (!soc) {
236 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530237 "%s: soc is null", __func__);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700238 return;
239 }
240 qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
241}
242
243/**
244 * dp_tx_create_flow_pool() - create flow pool
245 * @soc: Handle to struct dp_soc
246 * @flow_pool_id: flow pool id
247 * @flow_pool_size: flow pool size
248 *
249 * Return: flow_pool pointer / NULL for error
250 */
251struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
252 uint8_t flow_pool_id, uint16_t flow_pool_size)
253{
254 struct dp_tx_desc_pool_s *pool;
255 uint32_t stop_threshold;
256 uint32_t start_threshold;
257
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700258 if (flow_pool_id >= MAX_TXDESC_POOLS) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700259 dp_err("invalid flow_pool_id %d", flow_pool_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700260 return NULL;
261 }
262 pool = &soc->tx_desc[flow_pool_id];
263 qdf_spin_lock_bh(&pool->flow_pool_lock);
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700264 if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
chenguof44ac202018-08-28 18:58:52 +0800265 dp_tx_flow_pool_reattach(pool);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700266 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700267 dp_err("cannot alloc desc, status=%d, create_cnt=%d",
268 pool->status, pool->pool_create_cnt);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700269 return pool;
270 }
271
Paul Zhang66698092017-10-17 13:51:09 +0800272 if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
273 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700274 return NULL;
Paul Zhang66698092017-10-17 13:51:09 +0800275 }
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700276
277 stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
278 start_threshold = stop_threshold +
279 wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
280
281 pool->flow_pool_id = flow_pool_id;
282 pool->pool_size = flow_pool_size;
283 pool->avail_desc = flow_pool_size;
284 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
chenguof44ac202018-08-28 18:58:52 +0800285 dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
286 flow_pool_size);
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700287 pool->pool_create_cnt++;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700288
289 qdf_spin_unlock_bh(&pool->flow_pool_lock);
290
291 return pool;
292}
293
294/**
295 * dp_tx_delete_flow_pool() - delete flow pool
296 * @soc: Handle to struct dp_soc
297 * @pool: flow pool pointer
298 * @force: free pool forcefully
299 *
300 * Delete flow_pool if all tx descriptors are available.
301 * Otherwise put it in FLOW_POOL_INVALID state.
302 * If force is set then pull all available descriptors to
303 * global pool.
304 *
305 * Return: 0 for success or error
306 */
307int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
308 bool force)
309{
310 if (!soc || !pool) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700311 dp_err("pool or soc is NULL");
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700312 QDF_ASSERT(0);
313 return ENOMEM;
314 }
315
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700316 dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d",
317 pool->pool_create_cnt, pool->avail_desc,
318 pool->pool_size, pool->status);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700319 qdf_spin_lock_bh(&pool->flow_pool_lock);
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700320 if (!pool->pool_create_cnt) {
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700321 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700322 dp_err("flow pool either not created or alread deleted");
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700323 return -ENOENT;
324 }
325 pool->pool_create_cnt--;
326 if (pool->pool_create_cnt) {
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700327 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700328 dp_err("pool is still attached, pending detach %d",
329 pool->pool_create_cnt);
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700330 return -EAGAIN;
331 }
332
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700333 if (pool->avail_desc < pool->pool_size) {
334 pool->status = FLOW_POOL_INVALID;
335 qdf_spin_unlock_bh(&pool->flow_pool_lock);
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700336 dp_err("avail desc less than pool size");
Manjunathappa Prakashe6aba4f2018-05-08 19:55:25 -0700337 return -EAGAIN;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700338 }
339
340 /* We have all the descriptors for the pool, we can delete the pool */
341 dp_tx_desc_pool_free(soc, pool->flow_pool_id);
342 qdf_spin_unlock_bh(&pool->flow_pool_lock);
343 return 0;
344}
345
346/**
347 * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
348 * @pdev: Handle to struct dp_pdev
349 * @pool: flow_pool
350 * @vdev_id: flow_id /vdev_id
351 *
352 * Return: none
353 */
354static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
355 struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
356{
357 struct dp_vdev *vdev;
358 struct dp_soc *soc = pdev->soc;
359
Rakesh Pillaidce01372019-06-28 19:11:23 +0530360 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700361 if (!vdev) {
362 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530363 "%s: invalid vdev_id %d",
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700364 __func__, vdev_id);
365 return;
366 }
367
368 vdev->pool = pool;
369 qdf_spin_lock_bh(&pool->flow_pool_lock);
370 pool->pool_owner_ctx = soc;
371 pool->flow_pool_id = vdev_id;
372 qdf_spin_unlock_bh(&pool->flow_pool_lock);
373}
374
375/**
376 * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
377 * @pdev: Handle to struct dp_pdev
378 * @pool: flow_pool
379 * @vdev_id: flow_id /vdev_id
380 *
381 * Return: none
382 */
383static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
384 struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
385{
386 struct dp_vdev *vdev;
387 struct dp_soc *soc = pdev->soc;
388
Rakesh Pillaidce01372019-06-28 19:11:23 +0530389 vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700390 if (!vdev) {
391 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530392 "%s: invalid vdev_id %d",
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700393 __func__, vdev_id);
394 return;
395 }
396
397 vdev->pool = NULL;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700398}
399
400/**
401 * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
402 * @pdev: Handle to struct dp_pdev
403 * @flow_id: flow id
404 * @flow_type: flow type
405 * @flow_pool_id: pool id
406 * @flow_pool_size: pool size
407 *
408 * Process below target to host message
409 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
410 *
411 * Return: none
412 */
413QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
414 uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
415{
416 struct dp_soc *soc = pdev->soc;
417 struct dp_tx_desc_pool_s *pool;
418 enum htt_flow_type type = flow_type;
419
420
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700421 dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
422 flow_id, flow_type, flow_pool_id, flow_pool_size);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700423
424 if (qdf_unlikely(!soc)) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700425 dp_err("soc is NULL");
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700426 return QDF_STATUS_E_FAULT;
427 }
428 soc->pool_stats.pool_map_count++;
429
430 pool = dp_tx_create_flow_pool(soc, flow_pool_id,
431 flow_pool_size);
Jeff Johnsona8edf332019-03-18 09:51:52 -0700432 if (!pool) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700433 dp_err("creation of flow_pool %d size %d failed",
434 flow_pool_id, flow_pool_size);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700435 return QDF_STATUS_E_RESOURCES;
436 }
437
438 switch (type) {
439
440 case FLOW_TYPE_VDEV:
441 dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
442 break;
443 default:
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700444 dp_err("flow type %d not supported", type);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700445 break;
446 }
447
448 return QDF_STATUS_SUCCESS;
449}
450
451/**
452 * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
453 * @pdev: Handle to struct dp_pdev
454 * @flow_id: flow id
455 * @flow_type: flow type
456 * @flow_pool_id: pool id
457 *
458 * Process below target to host message
459 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
460 *
461 * Return: none
462 */
463void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
464 uint8_t flow_type, uint8_t flow_pool_id)
465{
466 struct dp_soc *soc = pdev->soc;
467 struct dp_tx_desc_pool_s *pool;
468 enum htt_flow_type type = flow_type;
469
470 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +0530471 "%s: flow_id %d flow_type %d flow_pool_id %d",
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700472 __func__, flow_id, flow_type, flow_pool_id);
473
474 if (qdf_unlikely(!pdev)) {
475 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
476 "%s: pdev is NULL", __func__);
477 return;
478 }
479 soc->pool_stats.pool_unmap_count++;
480
481 pool = &soc->tx_desc[flow_pool_id];
482 if (!pool) {
483 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530484 "%s: flow_pool not available flow_pool_id %d",
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700485 __func__, type);
486 return;
487 }
488
489 switch (type) {
490
491 case FLOW_TYPE_VDEV:
492 dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
493 break;
494 default:
495 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530496 "%s: flow type %d not supported !!!",
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700497 __func__, type);
498 return;
499 }
500
501 /* only delete if all descriptors are available */
502 dp_tx_delete_flow_pool(soc, pool, false);
503}
504
505/**
506 * dp_tx_flow_control_init() - Initialize tx flow control
507 * @tx_desc_pool: Handle to flow_pool
508 *
509 * Return: none
510 */
511void dp_tx_flow_control_init(struct dp_soc *soc)
512{
513 qdf_spinlock_create(&soc->flow_pool_array_lock);
514}
515
516/**
Sravan Kumar Kairam22075582019-08-24 01:26:57 +0530517 * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
518 * @tx_desc_pool: Handle to flow_pool
519 *
520 * Return: none
521 */
522static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
523{
524 struct dp_tx_desc_pool_s *tx_desc_pool;
525 int i;
526
527 for (i = 0; i < MAX_TXDESC_POOLS; i++) {
528 tx_desc_pool = &((soc)->tx_desc[i]);
529 if (!tx_desc_pool->desc_pages.num_pages)
530 continue;
531
532 if (dp_tx_desc_pool_free(soc, i) != QDF_STATUS_SUCCESS)
533 dp_err("Tx Desc Pool:%d Free failed", i);
534 }
535}
536
537/**
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700538 * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
539 * @tx_desc_pool: Handle to flow_pool
540 *
541 * Return: none
542 */
543void dp_tx_flow_control_deinit(struct dp_soc *soc)
544{
Sravan Kumar Kairam22075582019-08-24 01:26:57 +0530545 dp_tx_desc_pool_dealloc(soc);
546
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700547 qdf_spinlock_destroy(&soc->flow_pool_array_lock);
548}
549
550/**
551 * dp_txrx_register_pause_cb() - Register pause callback
552 * @ctx: Handle to struct dp_soc
553 * @pause_cb: Tx pause_cb
554 *
555 * Return: none
556 */
557QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
558 tx_pause_callback pause_cb)
559{
560 struct dp_soc *soc = (struct dp_soc *)handle;
561
562 if (!soc || !pause_cb) {
563 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
564 FL("soc or pause_cb is NULL"));
565 return QDF_STATUS_E_INVAL;
566 }
567 soc->pause_cb = pause_cb;
568
569 return QDF_STATUS_SUCCESS;
570}
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800571
Rakesh Pillaidce01372019-06-28 19:11:23 +0530572QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
573 uint8_t vdev_id)
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800574{
Rakesh Pillaidce01372019-06-28 19:11:23 +0530575 struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
576 struct dp_pdev *pdev =
577 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800578 int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
579
Rakesh Pillaidce01372019-06-28 19:11:23 +0530580 if (!pdev) {
581 dp_err("pdev is NULL");
582 return QDF_STATUS_E_INVAL;
583 }
584
585 return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
586 vdev_id, tx_ring_size);
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800587}
588
Rakesh Pillaidce01372019-06-28 19:11:23 +0530589void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800590 uint8_t vdev_id)
591{
Rakesh Pillaidce01372019-06-28 19:11:23 +0530592 struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
593 struct dp_pdev *pdev =
594 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
595
596 if (!pdev) {
597 dp_err("pdev is NULL");
598 return;
599 }
600
601 return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
602 FLOW_TYPE_VDEV, vdev_id);
Manjunathappa Prakash38205cc2018-03-06 14:22:44 -0800603}