blob: dc3e34eb60a627441b288794cf024a4cdd8bdb0f [file] [log] [blame]
Leo Changc2a7b762016-09-26 13:15:41 -07001/*
phadimana1f79822019-02-15 15:02:37 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Leo Changc2a7b762016-09-26 13:15:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053019#include "hal_hw_headers.h"
Leo Changc2a7b762016-09-26 13:15:41 -070020#include "dp_types.h"
21#include "dp_tx_desc.h"
22
23#ifndef DESC_PARTITION
24#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
25#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
26do { \
27 uint8_t sig_bit; \
28 soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
29 /* Calculate page divider to find page number */ \
30 sig_bit = 0; \
31 while (num_desc_per_page) { \
32 sig_bit++; \
33 num_desc_per_page = num_desc_per_page >> 1; \
34 } \
35 soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
36} while (0)
37#else
38#define DP_TX_DESC_SIZE(a) a
39#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
40#endif /* DESC_PARTITION */
41
42/**
Soumya Bhatdbb85302018-05-18 11:01:34 +053043 * dp_tx_desc_pool_counter_initialize() - Initialize counters
44 * @tx_desc_pool Handle to DP tx_desc_pool structure
45 * @num_elem Number of descriptor elements per pool
46 *
47 * Return: None
48 */
49#ifdef QCA_LL_TX_FLOW_CONTROL_V2
50static void
51dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
52 uint16_t num_elem)
53{
54}
55#else
56static void
57dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
58 uint16_t num_elem)
59{
60 tx_desc_pool->num_free = num_elem;
61 tx_desc_pool->num_allocated = 0;
62}
63#endif
64
65/**
Leo Changc2a7b762016-09-26 13:15:41 -070066 * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
67 * @soc Handle to DP SoC structure
68 * @num_pool Number of pools to allocate
69 * @num_elem Number of descriptor elements per pool
70 *
71 * This function allocates memory for SW tx descriptors
72 * (used within host for tx data path).
73 * The number of tx descriptors required will be large
74 * since based on number of clients (1024 clients x 3 radios),
75 * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
76 * large.
77 *
78 * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
79 * function to allocate memory
80 * in multiple pages. It then iterates through the memory allocated across pages
81 * and links each descriptor
82 * to next descriptor, taking care of page boundaries.
83 *
84 * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
85 * one for each ring;
86 * This minimizes lock contention when hard_start_xmit is called
87 * from multiple CPUs.
88 * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
89 * flow control.
90 *
91 * Return: Status code. 0 for success.
92 */
93QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
94 uint16_t num_elem)
95{
96 uint32_t id, count, page_id, offset, pool_id_32;
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -070097 uint16_t num_desc_per_page;
Leo Changc2a7b762016-09-26 13:15:41 -070098 struct dp_tx_desc_s *tx_desc_elem;
99 uint32_t desc_size;
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700100 struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
Leo Changc2a7b762016-09-26 13:15:41 -0700101
102 desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700103 tx_desc_pool->elem_size = desc_size;
phadimana1f79822019-02-15 15:02:37 +0530104 if (!dp_is_soc_reinit(soc))
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530105 qdf_mem_multi_pages_alloc(soc->osdev,
106 &tx_desc_pool->desc_pages,
107 desc_size, num_elem,
108 0, true);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700109 if (!tx_desc_pool->desc_pages.num_pages) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700110 dp_err("Multi page alloc fail, tx desc");
Leo Changc2a7b762016-09-26 13:15:41 -0700111 goto fail_exit;
112 }
113
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700114
Leo Changc2a7b762016-09-26 13:15:41 -0700115 num_desc_per_page =
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700116 tx_desc_pool->desc_pages.num_element_per_page;
117 tx_desc_pool->freelist = (struct dp_tx_desc_s *)
118 *tx_desc_pool->desc_pages.cacheable_pages;
Leo Changc2a7b762016-09-26 13:15:41 -0700119 if (qdf_mem_multi_page_link(soc->osdev,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530120 &tx_desc_pool->desc_pages,
121 desc_size, num_elem, true)) {
Varun Reddy Yeturu83a31a32019-06-06 15:37:21 -0700122 dp_err("invalid tx desc allocation - overflow num link");
Leo Changc2a7b762016-09-26 13:15:41 -0700123 goto free_tx_desc;
124 }
125
126 /* Set unique IDs for each Tx descriptor */
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700127 tx_desc_elem = tx_desc_pool->freelist;
Leo Changc2a7b762016-09-26 13:15:41 -0700128 count = 0;
129 pool_id_32 = (uint32_t)pool_id;
130 while (tx_desc_elem) {
131 page_id = count / num_desc_per_page;
132 offset = count % num_desc_per_page;
133 id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
134 (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
135
136 tx_desc_elem->id = id;
137 tx_desc_elem->pool_id = pool_id;
138 tx_desc_elem = tx_desc_elem->next;
139 count++;
140 }
141
Soumya Bhatdbb85302018-05-18 11:01:34 +0530142 dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700143 TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700144 return QDF_STATUS_SUCCESS;
145
146free_tx_desc:
147 qdf_mem_multi_pages_free(soc->osdev,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700148 &tx_desc_pool->desc_pages, 0, true);
Leo Changc2a7b762016-09-26 13:15:41 -0700149
150fail_exit:
151 return QDF_STATUS_E_FAULT;
152}
153
154/**
155 * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
156 *
157 * @soc Handle to DP SoC structure
158 * @pool_id
159 *
160 * Return:
161 */
162QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
163{
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700164 struct dp_tx_desc_pool_s *tx_desc_pool =
165 &((soc)->tx_desc[(pool_id)]);
166
Leo Changc2a7b762016-09-26 13:15:41 -0700167 qdf_mem_multi_pages_free(soc->osdev,
Manjunathappa Prakashced7ea62017-07-02 03:02:15 -0700168 &tx_desc_pool->desc_pages, 0, true);
169 TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
Jinwei Chen15da8a52018-05-22 16:12:10 +0800170 TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
Leo Changc2a7b762016-09-26 13:15:41 -0700171 return QDF_STATUS_SUCCESS;
172}
173
174/**
175 * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
176 * @soc Handle to DP SoC structure
177 * @pool_id
178 *
179 * Return: NONE
180 */
181QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
182 uint16_t num_elem)
183{
184 uint16_t num_page;
185 uint32_t count;
186 struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
187 struct qdf_mem_dma_page_t *page_info;
188 struct qdf_mem_multi_page_t *pages;
189 QDF_STATUS status;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530190 qdf_dma_context_t memctx = 0;
Leo Changc2a7b762016-09-26 13:15:41 -0700191
192 /* Coherent tx extension descriptor alloc */
Ishank Jain2f81e962017-01-23 22:42:37 +0530193 soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
Leo Changc2a7b762016-09-26 13:15:41 -0700194 soc->tx_ext_desc[pool_id].elem_count = num_elem;
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530195 memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
phadimana1f79822019-02-15 15:02:37 +0530196 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530197 qdf_mem_multi_pages_alloc(soc->osdev,
198 &soc->tx_ext_desc[pool_id].
199 desc_pages,
200 soc->tx_ext_desc[pool_id].elem_size,
201 soc->tx_ext_desc[pool_id].elem_count,
202 memctx, false);
203 }
Leo Changc2a7b762016-09-26 13:15:41 -0700204 if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
205 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530206 "ext desc page alloc fail");
Leo Changc2a7b762016-09-26 13:15:41 -0700207 status = QDF_STATUS_E_NOMEM;
208 goto fail_exit;
209 }
210
211 num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
212 /*
213 * Cacheable ext descriptor link alloc
214 * This structure also large size already
215 * single element is 24bytes, 2K elements are 48Kbytes
216 * Have to alloc multi page cacheable memory
217 */
218 soc->tx_ext_desc[pool_id].link_elem_size =
219 sizeof(struct dp_tx_ext_desc_elem_s);
phadimana1f79822019-02-15 15:02:37 +0530220 if (!dp_is_soc_reinit(soc)) {
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530221 qdf_mem_multi_pages_alloc(soc->osdev,
222 &soc->tx_ext_desc[pool_id].
223 desc_link_pages,
224 soc->tx_ext_desc[pool_id].
225 link_elem_size,
226 soc->tx_ext_desc[pool_id].
227 elem_count,
228 0, true);
229 }
Leo Changc2a7b762016-09-26 13:15:41 -0700230 if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
231 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530232 "ext link desc page alloc fail");
Leo Changc2a7b762016-09-26 13:15:41 -0700233 status = QDF_STATUS_E_NOMEM;
234 goto free_ext_desc_page;
235 }
236
237 /* link tx descriptors into a freelist */
238 soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
239 *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
240 if (qdf_mem_multi_page_link(soc->osdev,
241 &soc->tx_ext_desc[pool_id].desc_link_pages,
242 soc->tx_ext_desc[pool_id].link_elem_size,
243 soc->tx_ext_desc[pool_id].elem_count, true)) {
244 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530245 "ext link desc page linking fail");
Leo Changc2a7b762016-09-26 13:15:41 -0700246 status = QDF_STATUS_E_FAULT;
247 goto free_ext_link_desc_page;
248 }
249
250 /* Assign coherent memory pointer into linked free list */
251 pages = &soc->tx_ext_desc[pool_id].desc_pages;
252 page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
253 c_elem = soc->tx_ext_desc[pool_id].freelist;
254 p_elem = c_elem;
255 for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
256 if (!(count % pages->num_element_per_page)) {
257 /**
258 * First element for new page,
259 * should point next page
260 */
261 if (!pages->dma_pages->page_v_addr_start) {
262 QDF_TRACE(QDF_MODULE_ID_DP,
263 QDF_TRACE_LEVEL_ERROR,
264 "link over flow");
265 status = QDF_STATUS_E_FAULT;
266 goto free_ext_link_desc_page;
267 }
268 c_elem->vaddr = (void *)page_info->page_v_addr_start;
269 c_elem->paddr = page_info->page_p_addr;
270 page_info++;
271 } else {
272 c_elem->vaddr = (void *)(p_elem->vaddr +
273 soc->tx_ext_desc[pool_id].elem_size);
274 c_elem->paddr = (p_elem->paddr +
275 soc->tx_ext_desc[pool_id].elem_size);
276 }
277 p_elem = c_elem;
278 c_elem = c_elem->next;
279 if (!c_elem)
280 break;
281 }
282
Ishank Jain5122f8f2017-03-15 22:22:47 +0530283 soc->tx_ext_desc[pool_id].num_free = num_elem;
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800284 qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700285 return QDF_STATUS_SUCCESS;
286
287free_ext_link_desc_page:
288 qdf_mem_multi_pages_free(soc->osdev,
289 &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
290
291free_ext_desc_page:
292 qdf_mem_multi_pages_free(soc->osdev,
293 &soc->tx_ext_desc[pool_id].desc_pages,
294 qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
295 false);
296
297fail_exit:
298 return status;
299
300}
301
302/**
303 * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
304 * @soc: Handle to DP SoC structure
305 * @pool_id: extension descriptor pool id
306 *
307 * Return: NONE
308 */
309QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
310{
311 qdf_mem_multi_pages_free(soc->osdev,
312 &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
313
314 qdf_mem_multi_pages_free(soc->osdev,
315 &soc->tx_ext_desc[pool_id].desc_pages,
316 qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
317 false);
318
Manjunathappa Prakashddf07402017-12-02 21:36:16 -0800319 qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
Leo Changc2a7b762016-09-26 13:15:41 -0700320 return QDF_STATUS_SUCCESS;
321}
322
Ishank Jain5122f8f2017-03-15 22:22:47 +0530323/**
324 * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
325 * @soc: Handle to DP SoC structure
326 * @pool_id: tso descriptor pool id
327 * @num_elem: number of element
328 *
329 * Return: QDF_STATUS_SUCCESS
330 */
331#if defined(FEATURE_TSO)
332QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
333 uint16_t num_elem)
334{
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530335 struct dp_tx_tso_seg_pool_s *tso_desc_pool;
336 uint32_t desc_size;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530337
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530338 tso_desc_pool = &soc->tx_tso_desc[pool_id];
339 tso_desc_pool->num_free = 0;
340 desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
phadimana1f79822019-02-15 15:02:37 +0530341 if (!dp_is_soc_reinit(soc))
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530342 qdf_mem_multi_pages_alloc(soc->osdev,
343 &tso_desc_pool->desc_pages,
344 desc_size,
345 num_elem, 0, true);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530346
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530347 if (!tso_desc_pool->desc_pages.num_pages) {
Ishank Jain5122f8f2017-03-15 22:22:47 +0530348 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530349 FL("Alloc Failed %pK pool_id %d"),
350 soc, pool_id);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530351 return QDF_STATUS_E_NOMEM;
352 }
353
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530354 tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
355 *tso_desc_pool->desc_pages.cacheable_pages;
356 tso_desc_pool->num_free = num_elem;
357 if (qdf_mem_multi_page_link(soc->osdev,
358 &tso_desc_pool->desc_pages,
359 desc_size,
360 num_elem, true)) {
361 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
362 "invalid tso desc allocation - overflow num link");
363 goto free_tso_desc;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700364 }
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530365 TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
366 tso_desc_pool->pool_size = num_elem;
367 qdf_spinlock_create(&tso_desc_pool->lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530368
369 return QDF_STATUS_SUCCESS;
370
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530371free_tso_desc:
372 qdf_mem_multi_pages_free(soc->osdev,
373 &tso_desc_pool->desc_pages, 0, true);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530374
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530375 return QDF_STATUS_E_FAULT;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530376}
Ishank Jain5122f8f2017-03-15 22:22:47 +0530377
378/**
379 * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
380 * @soc: Handle to DP SoC structure
381 * @pool_id: extension descriptor pool id
382 *
383 * Return: NONE
384 */
Ishank Jain5122f8f2017-03-15 22:22:47 +0530385void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
386{
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530387 struct dp_tx_tso_seg_pool_s *tso_desc_pool;
Ishank Jain5122f8f2017-03-15 22:22:47 +0530388
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530389 tso_desc_pool = &soc->tx_tso_desc[pool_id];
Ishank Jain5122f8f2017-03-15 22:22:47 +0530390
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530391 qdf_spin_lock_bh(&tso_desc_pool->lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530392
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530393 qdf_mem_multi_pages_free(soc->osdev,
394 &tso_desc_pool->desc_pages, 0, true);
395 tso_desc_pool->freelist = NULL;
396 tso_desc_pool->num_free = 0;
397 tso_desc_pool->pool_size = 0;
398 qdf_spin_unlock_bh(&tso_desc_pool->lock);
399 qdf_spinlock_destroy(&tso_desc_pool->lock);
Ishank Jain5122f8f2017-03-15 22:22:47 +0530400 return;
401}
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700402/**
403 * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
404 * fragments in each tso segment
405 *
406 * @soc: handle to dp soc structure
407 * @pool_id: descriptor pool id
408 * @num_elem: total number of descriptors to be allocated
409 */
410QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
411 uint16_t num_elem)
412{
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530413 struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
414 uint32_t desc_size;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700415
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530416 tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
417 tso_num_seg_pool->num_free = 0;
418 desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
phadimana1f79822019-02-15 15:02:37 +0530419 if (!dp_is_soc_reinit(soc))
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530420 qdf_mem_multi_pages_alloc(soc->osdev,
421 &tso_num_seg_pool->desc_pages,
422 desc_size,
423 num_elem, 0, true);
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530424 if (!tso_num_seg_pool->desc_pages.num_pages) {
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700425 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Anish Nataraje9d4c3b2018-11-24 22:24:56 +0530426 FL("Alloc Failed %pK pool_id %d"),
427 soc, pool_id);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700428 return QDF_STATUS_E_NOMEM;
429 }
430
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530431 if (qdf_mem_multi_page_link(soc->osdev,
432 &tso_num_seg_pool->desc_pages,
433 desc_size,
434 num_elem, true)) {
435 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
436 "invalid tso desc allocation - overflow num link");
437 goto fail;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700438 }
439
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530440 tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
441 *tso_num_seg_pool->desc_pages.cacheable_pages;
442 tso_num_seg_pool->num_free = num_elem;
443 tso_num_seg_pool->num_seg_pool_size = num_elem;
444
445 qdf_spinlock_create(&tso_num_seg_pool->lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700446
447 return QDF_STATUS_SUCCESS;
448
449fail:
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530450 qdf_mem_multi_pages_free(soc->osdev,
451 &tso_num_seg_pool->desc_pages, 0, true);
452
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700453 return QDF_STATUS_E_NOMEM;
454}
455
456/**
457 * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
458 * the fragments in tso segment
459 *
460 *
461 * @soc: handle to dp soc structure
462 * @pool_id: descriptor pool_id
463 */
464void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
465{
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530466 struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700467
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530468 tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
469 qdf_spin_lock_bh(&tso_num_seg_pool->lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700470
Kiran Venkatappa08bf93b2018-08-06 22:08:36 +0530471 qdf_mem_multi_pages_free(soc->osdev,
472 &tso_num_seg_pool->desc_pages, 0, true);
473 tso_num_seg_pool->freelist = NULL;
474 tso_num_seg_pool->num_free = 0;
475 tso_num_seg_pool->num_seg_pool_size = 0;
476 qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
477 qdf_spinlock_destroy(&tso_num_seg_pool->lock);
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700478 return;
479}
480
Ishank Jain5122f8f2017-03-15 22:22:47 +0530481#else
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700482QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
483 uint16_t num_elem)
484{
485 return QDF_STATUS_SUCCESS;
486}
487
Ishank Jain5122f8f2017-03-15 22:22:47 +0530488void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
489{
490 return;
491}
Venkata Sharath Chandra Manchala35503cc2017-04-06 15:30:54 -0700492
493QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
494 uint16_t num_elem)
495{
496 return QDF_STATUS_SUCCESS;
497}
498
499void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
500{
501 return;
502}
Ishank Jain5122f8f2017-03-15 22:22:47 +0530503#endif