Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 1 | /* |
Jinwei Chen | 3222184 | 2020-01-16 19:52:41 +0800 | [diff] [blame] | 2 | * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
| 19 | #include "dp_types.h" |
| 20 | #include "dp_rx.h" |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 21 | #include "dp_ipa.h" |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 22 | |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 23 | #ifdef RX_DESC_MULTI_PAGE_ALLOC |
| 24 | A_COMPILE_TIME_ASSERT(cookie_size_check, |
| 25 | PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <= |
| 26 | 1 << DP_RX_DESC_PAGE_ID_SHIFT); |
| 27 | |
| 28 | QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, |
| 29 | uint32_t num_elem, |
| 30 | struct rx_desc_pool *rx_desc_pool) |
| 31 | { |
| 32 | uint32_t id, page_id, offset, desc_size, num_desc_per_page; |
| 33 | uint32_t count = 0; |
| 34 | union dp_rx_desc_list_elem_t *rx_desc_elem; |
| 35 | |
| 36 | desc_size = sizeof(*rx_desc_elem); |
| 37 | rx_desc_pool->elem_size = desc_size; |
| 38 | if (!dp_is_soc_reinit(soc)) { |
| 39 | qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages, |
| 40 | desc_size, num_elem, 0, true); |
| 41 | if (!rx_desc_pool->desc_pages.num_pages) { |
| 42 | qdf_err("Multi page alloc fail,size=%d, elem=%d", |
| 43 | desc_size, num_elem); |
| 44 | return QDF_STATUS_E_NOMEM; |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page; |
| 49 | rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *) |
| 50 | *rx_desc_pool->desc_pages.cacheable_pages; |
| 51 | if (qdf_mem_multi_page_link(soc->osdev, |
| 52 | &rx_desc_pool->desc_pages, |
| 53 | desc_size, num_elem, true)) { |
| 54 | qdf_err("overflow num link,size=%d, elem=%d", |
| 55 | desc_size, num_elem); |
| 56 | goto free_rx_desc_pool; |
| 57 | } |
| 58 | /* Initialize the lock */ |
| 59 | qdf_spinlock_create(&rx_desc_pool->lock); |
| 60 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
| 61 | rx_desc_pool->pool_size = num_elem; |
| 62 | |
| 63 | rx_desc_elem = rx_desc_pool->freelist; |
| 64 | while (rx_desc_elem) { |
| 65 | page_id = count / num_desc_per_page; |
| 66 | offset = count % num_desc_per_page; |
| 67 | /* |
| 68 | * Below cookie size is from REO destination ring |
| 69 | * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie |
| 70 | * cookie size = 21 bits |
| 71 | * 8 bits - offset |
| 72 | * 8 bits - page ID |
| 73 | * 4 bits - pool ID |
| 74 | */ |
| 75 | id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) | |
| 76 | (page_id << DP_RX_DESC_PAGE_ID_SHIFT) | |
| 77 | offset); |
| 78 | rx_desc_elem->rx_desc.cookie = id; |
| 79 | rx_desc_elem->rx_desc.pool_id = pool_id; |
| 80 | rx_desc_elem->rx_desc.in_use = 0; |
| 81 | rx_desc_elem = rx_desc_elem->next; |
| 82 | count++; |
| 83 | } |
| 84 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
| 85 | return QDF_STATUS_SUCCESS; |
| 86 | |
| 87 | free_rx_desc_pool: |
| 88 | dp_rx_desc_pool_free(soc, rx_desc_pool); |
| 89 | |
| 90 | return QDF_STATUS_E_FAULT; |
| 91 | } |
| 92 | |
| 93 | union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, |
| 94 | struct rx_desc_pool *rx_desc_pool) |
| 95 | { |
| 96 | return rx_desc_pool->desc_pages.cacheable_pages[page_id] + |
| 97 | rx_desc_pool->elem_size * offset; |
| 98 | } |
| 99 | |
| 100 | static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc, |
| 101 | struct rx_desc_pool *rx_desc_pool) |
| 102 | { |
| 103 | uint32_t i, num_desc, page_id, offset, num_desc_per_page; |
| 104 | union dp_rx_desc_list_elem_t *rx_desc_elem; |
| 105 | struct dp_rx_desc *rx_desc; |
| 106 | qdf_nbuf_t nbuf; |
| 107 | |
| 108 | if (qdf_unlikely(!(rx_desc_pool-> |
| 109 | desc_pages.cacheable_pages))) { |
| 110 | qdf_err("No pages found on this desc pool"); |
| 111 | return QDF_STATUS_E_INVAL; |
| 112 | } |
| 113 | num_desc = rx_desc_pool->pool_size; |
| 114 | num_desc_per_page = |
| 115 | rx_desc_pool->desc_pages.num_element_per_page; |
| 116 | for (i = 0; i < num_desc; i++) { |
| 117 | page_id = i / num_desc_per_page; |
| 118 | offset = i % num_desc_per_page; |
| 119 | rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool); |
| 120 | rx_desc = &rx_desc_elem->rx_desc; |
| 121 | if (rx_desc->in_use) { |
| 122 | nbuf = rx_desc->nbuf; |
| 123 | if (!rx_desc->unmapped) { |
| 124 | dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, |
| 125 | false); |
| 126 | qdf_nbuf_unmap_single(soc->osdev, nbuf, |
| 127 | QDF_DMA_BIDIRECTIONAL); |
| 128 | } |
| 129 | qdf_nbuf_free(nbuf); |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | return QDF_STATUS_SUCCESS; |
| 134 | } |
| 135 | |
| 136 | void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, |
| 137 | struct rx_desc_pool *rx_desc_pool) |
| 138 | { |
| 139 | QDF_STATUS qdf_status; |
| 140 | |
| 141 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
| 142 | qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool); |
| 143 | if (QDF_IS_STATUS_SUCCESS(qdf_status)) |
| 144 | dp_rx_desc_pool_free(soc, rx_desc_pool); |
| 145 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
| 146 | |
| 147 | qdf_spinlock_destroy(&rx_desc_pool->lock); |
| 148 | } |
| 149 | |
| 150 | void dp_rx_desc_nbuf_free(struct dp_soc *soc, |
| 151 | struct rx_desc_pool *rx_desc_pool) |
| 152 | { |
| 153 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
| 154 | __dp_rx_desc_nbuf_free(soc, rx_desc_pool); |
| 155 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
| 156 | |
| 157 | qdf_spinlock_destroy(&rx_desc_pool->lock); |
| 158 | } |
| 159 | |
| 160 | void dp_rx_desc_pool_free(struct dp_soc *soc, |
| 161 | struct rx_desc_pool *rx_desc_pool) |
| 162 | { |
| 163 | if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) |
| 164 | return; |
| 165 | qdf_mem_multi_pages_free(soc->osdev, |
| 166 | &rx_desc_pool->desc_pages, 0, true); |
| 167 | } |
| 168 | #else |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 169 | QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, |
| 170 | uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 171 | { |
| 172 | uint32_t i; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 173 | |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 174 | if (!dp_is_soc_reinit(soc)) { |
| 175 | rx_desc_pool->array = |
| 176 | qdf_mem_malloc(pool_size * |
| 177 | sizeof(union dp_rx_desc_list_elem_t)); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 178 | |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 179 | if (!(rx_desc_pool->array)) { |
| 180 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, |
| 181 | "%s: RX Desc Pool[%d] allocation failed", |
| 182 | __func__, pool_id); |
| 183 | return QDF_STATUS_E_NOMEM; |
| 184 | } |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 187 | /* Initialize the lock */ |
| 188 | qdf_spinlock_create(&rx_desc_pool->lock); |
| 189 | |
| 190 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 191 | rx_desc_pool->pool_size = pool_size; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 192 | |
| 193 | /* link SW rx descs into a freelist */ |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 194 | rx_desc_pool->freelist = &rx_desc_pool->array[0]; |
Varsha Mishra | 10b86d6 | 2019-09-04 14:11:57 +0530 | [diff] [blame] | 195 | for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { |
| 196 | if (i == rx_desc_pool->pool_size - 1) |
| 197 | rx_desc_pool->array[i].next = NULL; |
| 198 | else |
| 199 | rx_desc_pool->array[i].next = |
| 200 | &rx_desc_pool->array[i + 1]; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 201 | rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); |
| 202 | rx_desc_pool->array[i].rx_desc.pool_id = pool_id; |
Pramod Simha | 59fcb31 | 2017-06-22 17:43:16 -0700 | [diff] [blame] | 203 | rx_desc_pool->array[i].rx_desc.in_use = 0; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 204 | } |
| 205 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 206 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 207 | return QDF_STATUS_SUCCESS; |
| 208 | } |
| 209 | |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 210 | void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, |
| 211 | struct rx_desc_pool *rx_desc_pool) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 212 | { |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 213 | qdf_nbuf_t nbuf; |
Manikandan Mohan | b01696b | 2017-05-09 18:03:19 -0700 | [diff] [blame] | 214 | int i; |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 215 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 216 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
Manikandan Mohan | b01696b | 2017-05-09 18:03:19 -0700 | [diff] [blame] | 217 | for (i = 0; i < rx_desc_pool->pool_size; i++) { |
jiad | 78e3445 | 2017-11-25 17:32:36 +0800 | [diff] [blame] | 218 | if (rx_desc_pool->array[i].rx_desc.in_use) { |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 219 | nbuf = rx_desc_pool->array[i].rx_desc.nbuf; |
| 220 | |
| 221 | if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { |
| 222 | dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, |
| 223 | false); |
| 224 | |
| 225 | qdf_nbuf_unmap_single(soc->osdev, nbuf, |
Ankit Kumar | 0ae4abc | 2019-05-02 15:08:42 +0530 | [diff] [blame] | 226 | QDF_DMA_FROM_DEVICE); |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 227 | } |
| 228 | qdf_nbuf_free(nbuf); |
jiad | 78e3445 | 2017-11-25 17:32:36 +0800 | [diff] [blame] | 229 | } |
Manikandan Mohan | b01696b | 2017-05-09 18:03:19 -0700 | [diff] [blame] | 230 | } |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 231 | qdf_mem_free(rx_desc_pool->array); |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 232 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
| 233 | qdf_spinlock_destroy(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 234 | } |
| 235 | |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 236 | void dp_rx_desc_nbuf_free(struct dp_soc *soc, |
| 237 | struct rx_desc_pool *rx_desc_pool) |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 238 | { |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 239 | qdf_nbuf_t nbuf; |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 240 | int i; |
| 241 | |
| 242 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
| 243 | for (i = 0; i < rx_desc_pool->pool_size; i++) { |
| 244 | if (rx_desc_pool->array[i].rx_desc.in_use) { |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 245 | nbuf = rx_desc_pool->array[i].rx_desc.nbuf; |
| 246 | |
| 247 | if (!(rx_desc_pool->array[i].rx_desc.unmapped)) { |
| 248 | dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, |
| 249 | false); |
| 250 | |
| 251 | qdf_nbuf_unmap_single(soc->osdev, nbuf, |
Ankit Kumar | 0ae4abc | 2019-05-02 15:08:42 +0530 | [diff] [blame] | 252 | QDF_DMA_FROM_DEVICE); |
jiad | 3b8104b | 2019-03-08 17:23:35 +0800 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | qdf_nbuf_free(nbuf); |
phadiman | 449a268 | 2019-02-20 14:00:00 +0530 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
| 259 | qdf_spinlock_destroy(&rx_desc_pool->lock); |
| 260 | } |
| 261 | |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 262 | void dp_rx_desc_pool_free(struct dp_soc *soc, |
| 263 | struct rx_desc_pool *rx_desc_pool) |
phadiman | 7dd261d | 2019-03-15 01:48:50 +0530 | [diff] [blame] | 264 | { |
| 265 | qdf_mem_free(rx_desc_pool->array); |
| 266 | } |
Varun Reddy Yeturu | a7c21dc | 2019-05-16 14:03:46 -0700 | [diff] [blame] | 267 | #endif /* RX_DESC_MULTI_PAGE_ALLOC */ |
phadiman | 7dd261d | 2019-03-15 01:48:50 +0530 | [diff] [blame] | 268 | /* |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 269 | * dp_rx_get_free_desc_list() - provide a list of descriptors from |
| 270 | * the free rx desc pool. |
| 271 | * |
| 272 | * @soc: core txrx main context |
| 273 | * @pool_id: pool_id which is one of 3 mac_ids |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 274 | * @rx_desc_pool: rx descriptor pool pointer |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 275 | * @num_descs: number of descs requested from freelist |
| 276 | * @desc_list: attach the descs to this list (output parameter) |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 277 | * @tail: attach the point to last desc of free list (output parameter) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 278 | * |
| 279 | * Return: number of descs allocated from free list. |
| 280 | */ |
| 281 | uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 282 | struct rx_desc_pool *rx_desc_pool, |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 283 | uint16_t num_descs, |
| 284 | union dp_rx_desc_list_elem_t **desc_list, |
| 285 | union dp_rx_desc_list_elem_t **tail) |
| 286 | { |
| 287 | uint16_t count; |
| 288 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 289 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 290 | |
Chaithanya Garrepalli | 8e5e2f8 | 2018-03-08 12:55:46 +0530 | [diff] [blame] | 291 | *desc_list = *tail = rx_desc_pool->freelist; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 292 | |
| 293 | for (count = 0; count < num_descs; count++) { |
| 294 | |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 295 | if (qdf_unlikely(!rx_desc_pool->freelist)) { |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 296 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 297 | return count; |
| 298 | } |
Chaithanya Garrepalli | 8e5e2f8 | 2018-03-08 12:55:46 +0530 | [diff] [blame] | 299 | *tail = rx_desc_pool->freelist; |
| 300 | rx_desc_pool->freelist = rx_desc_pool->freelist->next; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 301 | } |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 302 | (*tail)->next = NULL; |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 303 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 304 | return count; |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to |
| 309 | * freelist. |
| 310 | * |
| 311 | * @soc: core txrx main context |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 312 | * @local_desc_list: local desc list provided by the caller |
| 313 | * @tail: attach the point to last desc of local desc list |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 314 | * @pool_id: pool_id which is one of 3 mac_ids |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 315 | * @rx_desc_pool: rx descriptor pool pointer |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 316 | */ |
| 317 | void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, |
| 318 | union dp_rx_desc_list_elem_t **local_desc_list, |
| 319 | union dp_rx_desc_list_elem_t **tail, |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 320 | uint16_t pool_id, |
| 321 | struct rx_desc_pool *rx_desc_pool) |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 322 | { |
| 323 | union dp_rx_desc_list_elem_t *temp_list = NULL; |
| 324 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 325 | qdf_spin_lock_bh(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 326 | |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 327 | |
| 328 | temp_list = rx_desc_pool->freelist; |
Karunakar Dasineni | 87f0c5d | 2017-10-29 21:54:21 -0700 | [diff] [blame] | 329 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 330 | "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK", |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 331 | temp_list, *local_desc_list, *tail, (*tail)->next); |
Kai Chen | 6eca1a6 | 2017-01-12 10:17:53 -0800 | [diff] [blame] | 332 | rx_desc_pool->freelist = *local_desc_list; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 333 | (*tail)->next = temp_list; |
Jinwei Chen | 3222184 | 2020-01-16 19:52:41 +0800 | [diff] [blame] | 334 | *tail = NULL; |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 335 | |
Venkata Sharath Chandra Manchala | bcfa6d8 | 2018-03-28 23:42:39 -0700 | [diff] [blame] | 336 | qdf_spin_unlock_bh(&rx_desc_pool->lock); |
Debashis Dutt | c4c52dc | 2016-10-04 17:12:23 -0700 | [diff] [blame] | 337 | } |