blob: 387aa0064d3052810e06e16fe79d71fad08b2679 [file] [log] [blame]
Debashis Duttc4c52dc2016-10-04 17:12:23 -07001/*
Jinwei Chen32221842020-01-16 19:52:41 +08002 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Debashis Duttc4c52dc2016-10-04 17:12:23 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "dp_types.h"
20#include "dp_rx.h"
jiad3b8104b2019-03-08 17:23:35 +080021#include "dp_ipa.h"
Debashis Duttc4c52dc2016-10-04 17:12:23 -070022
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -070023#ifdef RX_DESC_MULTI_PAGE_ALLOC
24A_COMPILE_TIME_ASSERT(cookie_size_check,
25 PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
26 1 << DP_RX_DESC_PAGE_ID_SHIFT);
27
28QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
29 uint32_t num_elem,
30 struct rx_desc_pool *rx_desc_pool)
31{
32 uint32_t id, page_id, offset, desc_size, num_desc_per_page;
33 uint32_t count = 0;
34 union dp_rx_desc_list_elem_t *rx_desc_elem;
35
36 desc_size = sizeof(*rx_desc_elem);
37 rx_desc_pool->elem_size = desc_size;
38 if (!dp_is_soc_reinit(soc)) {
39 qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
40 desc_size, num_elem, 0, true);
41 if (!rx_desc_pool->desc_pages.num_pages) {
42 qdf_err("Multi page alloc fail,size=%d, elem=%d",
43 desc_size, num_elem);
44 return QDF_STATUS_E_NOMEM;
45 }
46 }
47
48 num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
49 rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
50 *rx_desc_pool->desc_pages.cacheable_pages;
51 if (qdf_mem_multi_page_link(soc->osdev,
52 &rx_desc_pool->desc_pages,
53 desc_size, num_elem, true)) {
54 qdf_err("overflow num link,size=%d, elem=%d",
55 desc_size, num_elem);
56 goto free_rx_desc_pool;
57 }
58 /* Initialize the lock */
59 qdf_spinlock_create(&rx_desc_pool->lock);
60 qdf_spin_lock_bh(&rx_desc_pool->lock);
61 rx_desc_pool->pool_size = num_elem;
62
63 rx_desc_elem = rx_desc_pool->freelist;
64 while (rx_desc_elem) {
65 page_id = count / num_desc_per_page;
66 offset = count % num_desc_per_page;
67 /*
68 * Below cookie size is from REO destination ring
69 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
70 * cookie size = 21 bits
71 * 8 bits - offset
72 * 8 bits - page ID
73 * 4 bits - pool ID
74 */
75 id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
76 (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
77 offset);
78 rx_desc_elem->rx_desc.cookie = id;
79 rx_desc_elem->rx_desc.pool_id = pool_id;
80 rx_desc_elem->rx_desc.in_use = 0;
81 rx_desc_elem = rx_desc_elem->next;
82 count++;
83 }
84 qdf_spin_unlock_bh(&rx_desc_pool->lock);
85 return QDF_STATUS_SUCCESS;
86
87free_rx_desc_pool:
88 dp_rx_desc_pool_free(soc, rx_desc_pool);
89
90 return QDF_STATUS_E_FAULT;
91}
92
93union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
94 struct rx_desc_pool *rx_desc_pool)
95{
96 return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
97 rx_desc_pool->elem_size * offset;
98}
99
100static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
101 struct rx_desc_pool *rx_desc_pool)
102{
103 uint32_t i, num_desc, page_id, offset, num_desc_per_page;
104 union dp_rx_desc_list_elem_t *rx_desc_elem;
105 struct dp_rx_desc *rx_desc;
106 qdf_nbuf_t nbuf;
107
108 if (qdf_unlikely(!(rx_desc_pool->
109 desc_pages.cacheable_pages))) {
110 qdf_err("No pages found on this desc pool");
111 return QDF_STATUS_E_INVAL;
112 }
113 num_desc = rx_desc_pool->pool_size;
114 num_desc_per_page =
115 rx_desc_pool->desc_pages.num_element_per_page;
116 for (i = 0; i < num_desc; i++) {
117 page_id = i / num_desc_per_page;
118 offset = i % num_desc_per_page;
119 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
120 rx_desc = &rx_desc_elem->rx_desc;
121 if (rx_desc->in_use) {
122 nbuf = rx_desc->nbuf;
123 if (!rx_desc->unmapped) {
124 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
125 false);
126 qdf_nbuf_unmap_single(soc->osdev, nbuf,
127 QDF_DMA_BIDIRECTIONAL);
128 }
129 qdf_nbuf_free(nbuf);
130 }
131 }
132
133 return QDF_STATUS_SUCCESS;
134}
135
136void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
137 struct rx_desc_pool *rx_desc_pool)
138{
139 QDF_STATUS qdf_status;
140
141 qdf_spin_lock_bh(&rx_desc_pool->lock);
142 qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
143 if (QDF_IS_STATUS_SUCCESS(qdf_status))
144 dp_rx_desc_pool_free(soc, rx_desc_pool);
145 qdf_spin_unlock_bh(&rx_desc_pool->lock);
146
147 qdf_spinlock_destroy(&rx_desc_pool->lock);
148}
149
150void dp_rx_desc_nbuf_free(struct dp_soc *soc,
151 struct rx_desc_pool *rx_desc_pool)
152{
153 qdf_spin_lock_bh(&rx_desc_pool->lock);
154 __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
155 qdf_spin_unlock_bh(&rx_desc_pool->lock);
156
157 qdf_spinlock_destroy(&rx_desc_pool->lock);
158}
159
160void dp_rx_desc_pool_free(struct dp_soc *soc,
161 struct rx_desc_pool *rx_desc_pool)
162{
163 if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
164 return;
165 qdf_mem_multi_pages_free(soc->osdev,
166 &rx_desc_pool->desc_pages, 0, true);
167}
168#else
Kai Chen6eca1a62017-01-12 10:17:53 -0800169QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
170 uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700171{
172 uint32_t i;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700173
phadiman449a2682019-02-20 14:00:00 +0530174 if (!dp_is_soc_reinit(soc)) {
175 rx_desc_pool->array =
176 qdf_mem_malloc(pool_size *
177 sizeof(union dp_rx_desc_list_elem_t));
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700178
phadiman449a2682019-02-20 14:00:00 +0530179 if (!(rx_desc_pool->array)) {
180 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
181 "%s: RX Desc Pool[%d] allocation failed",
182 __func__, pool_id);
183 return QDF_STATUS_E_NOMEM;
184 }
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700185 }
186
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700187 /* Initialize the lock */
188 qdf_spinlock_create(&rx_desc_pool->lock);
189
190 qdf_spin_lock_bh(&rx_desc_pool->lock);
Kai Chen6eca1a62017-01-12 10:17:53 -0800191 rx_desc_pool->pool_size = pool_size;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700192
193 /* link SW rx descs into a freelist */
Kai Chen6eca1a62017-01-12 10:17:53 -0800194 rx_desc_pool->freelist = &rx_desc_pool->array[0];
Varsha Mishra10b86d62019-09-04 14:11:57 +0530195 for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
196 if (i == rx_desc_pool->pool_size - 1)
197 rx_desc_pool->array[i].next = NULL;
198 else
199 rx_desc_pool->array[i].next =
200 &rx_desc_pool->array[i + 1];
Kai Chen6eca1a62017-01-12 10:17:53 -0800201 rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
202 rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
Pramod Simha59fcb312017-06-22 17:43:16 -0700203 rx_desc_pool->array[i].rx_desc.in_use = 0;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700204 }
205
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700206 qdf_spin_unlock_bh(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700207 return QDF_STATUS_SUCCESS;
208}
209
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700210void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
211 struct rx_desc_pool *rx_desc_pool)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700212{
jiad3b8104b2019-03-08 17:23:35 +0800213 qdf_nbuf_t nbuf;
Manikandan Mohanb01696b2017-05-09 18:03:19 -0700214 int i;
Kai Chen6eca1a62017-01-12 10:17:53 -0800215
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700216 qdf_spin_lock_bh(&rx_desc_pool->lock);
Manikandan Mohanb01696b2017-05-09 18:03:19 -0700217 for (i = 0; i < rx_desc_pool->pool_size; i++) {
jiad78e34452017-11-25 17:32:36 +0800218 if (rx_desc_pool->array[i].rx_desc.in_use) {
jiad3b8104b2019-03-08 17:23:35 +0800219 nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
220
221 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
222 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
223 false);
224
225 qdf_nbuf_unmap_single(soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530226 QDF_DMA_FROM_DEVICE);
jiad3b8104b2019-03-08 17:23:35 +0800227 }
228 qdf_nbuf_free(nbuf);
jiad78e34452017-11-25 17:32:36 +0800229 }
Manikandan Mohanb01696b2017-05-09 18:03:19 -0700230 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800231 qdf_mem_free(rx_desc_pool->array);
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700232 qdf_spin_unlock_bh(&rx_desc_pool->lock);
233 qdf_spinlock_destroy(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700234}
235
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700236void dp_rx_desc_nbuf_free(struct dp_soc *soc,
237 struct rx_desc_pool *rx_desc_pool)
phadiman449a2682019-02-20 14:00:00 +0530238{
jiad3b8104b2019-03-08 17:23:35 +0800239 qdf_nbuf_t nbuf;
phadiman449a2682019-02-20 14:00:00 +0530240 int i;
241
242 qdf_spin_lock_bh(&rx_desc_pool->lock);
243 for (i = 0; i < rx_desc_pool->pool_size; i++) {
244 if (rx_desc_pool->array[i].rx_desc.in_use) {
jiad3b8104b2019-03-08 17:23:35 +0800245 nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
246
247 if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
248 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
249 false);
250
251 qdf_nbuf_unmap_single(soc->osdev, nbuf,
Ankit Kumar0ae4abc2019-05-02 15:08:42 +0530252 QDF_DMA_FROM_DEVICE);
jiad3b8104b2019-03-08 17:23:35 +0800253 }
254
255 qdf_nbuf_free(nbuf);
phadiman449a2682019-02-20 14:00:00 +0530256 }
257 }
258 qdf_spin_unlock_bh(&rx_desc_pool->lock);
259 qdf_spinlock_destroy(&rx_desc_pool->lock);
260}
261
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700262void dp_rx_desc_pool_free(struct dp_soc *soc,
263 struct rx_desc_pool *rx_desc_pool)
phadiman7dd261d2019-03-15 01:48:50 +0530264{
265 qdf_mem_free(rx_desc_pool->array);
266}
Varun Reddy Yeturua7c21dc2019-05-16 14:03:46 -0700267#endif /* RX_DESC_MULTI_PAGE_ALLOC */
phadiman7dd261d2019-03-15 01:48:50 +0530268/*
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700269 * dp_rx_get_free_desc_list() - provide a list of descriptors from
270 * the free rx desc pool.
271 *
272 * @soc: core txrx main context
273 * @pool_id: pool_id which is one of 3 mac_ids
Kai Chen6eca1a62017-01-12 10:17:53 -0800274 * @rx_desc_pool: rx descriptor pool pointer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700275 * @num_descs: number of descs requested from freelist
276 * @desc_list: attach the descs to this list (output parameter)
Kai Chen6eca1a62017-01-12 10:17:53 -0800277 * @tail: attach the point to last desc of free list (output parameter)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700278 *
279 * Return: number of descs allocated from free list.
280 */
281uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
Kai Chen6eca1a62017-01-12 10:17:53 -0800282 struct rx_desc_pool *rx_desc_pool,
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700283 uint16_t num_descs,
284 union dp_rx_desc_list_elem_t **desc_list,
285 union dp_rx_desc_list_elem_t **tail)
286{
287 uint16_t count;
288
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700289 qdf_spin_lock_bh(&rx_desc_pool->lock);
Kai Chen6eca1a62017-01-12 10:17:53 -0800290
Chaithanya Garrepalli8e5e2f82018-03-08 12:55:46 +0530291 *desc_list = *tail = rx_desc_pool->freelist;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700292
293 for (count = 0; count < num_descs; count++) {
294
Kai Chen6eca1a62017-01-12 10:17:53 -0800295 if (qdf_unlikely(!rx_desc_pool->freelist)) {
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700296 qdf_spin_unlock_bh(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700297 return count;
298 }
Chaithanya Garrepalli8e5e2f82018-03-08 12:55:46 +0530299 *tail = rx_desc_pool->freelist;
300 rx_desc_pool->freelist = rx_desc_pool->freelist->next;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700301 }
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700302 (*tail)->next = NULL;
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700303 qdf_spin_unlock_bh(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700304 return count;
305}
306
307/*
308 * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
309 * freelist.
310 *
311 * @soc: core txrx main context
Kai Chen6eca1a62017-01-12 10:17:53 -0800312 * @local_desc_list: local desc list provided by the caller
313 * @tail: attach the point to last desc of local desc list
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700314 * @pool_id: pool_id which is one of 3 mac_ids
Kai Chen6eca1a62017-01-12 10:17:53 -0800315 * @rx_desc_pool: rx descriptor pool pointer
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700316 */
317void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
318 union dp_rx_desc_list_elem_t **local_desc_list,
319 union dp_rx_desc_list_elem_t **tail,
Kai Chen6eca1a62017-01-12 10:17:53 -0800320 uint16_t pool_id,
321 struct rx_desc_pool *rx_desc_pool)
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700322{
323 union dp_rx_desc_list_elem_t *temp_list = NULL;
324
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700325 qdf_spin_lock_bh(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700326
Kai Chen6eca1a62017-01-12 10:17:53 -0800327
328 temp_list = rx_desc_pool->freelist;
Karunakar Dasineni87f0c5d2017-10-29 21:54:21 -0700329 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530330 "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700331 temp_list, *local_desc_list, *tail, (*tail)->next);
Kai Chen6eca1a62017-01-12 10:17:53 -0800332 rx_desc_pool->freelist = *local_desc_list;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700333 (*tail)->next = temp_list;
Jinwei Chen32221842020-01-16 19:52:41 +0800334 *tail = NULL;
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700335
Venkata Sharath Chandra Manchalabcfa6d82018-03-28 23:42:39 -0700336 qdf_spin_unlock_bh(&rx_desc_pool->lock);
Debashis Duttc4c52dc2016-10-04 17:12:23 -0700337}