Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1 | /* |
Kai Liu | b8e1241 | 2018-01-12 16:52:26 +0800 | [diff] [blame] | 2 | * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 19 | #include "hal_api.h" |
Balamurugan Mahalingam | f72cb1f | 2018-06-25 12:18:34 +0530 | [diff] [blame] | 20 | #include "hal_hw_headers.h" |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 21 | #include "hal_reo.h" |
| 22 | #include "hal_tx.h" |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 23 | #include "hal_rx.h" |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 24 | #include "qdf_module.h" |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 25 | |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 26 | /* TODO: See if the following definition is available in HW headers */ |
| 27 | #define HAL_REO_OWNED 4 |
| 28 | #define HAL_REO_QUEUE_DESC 8 |
| 29 | #define HAL_REO_QUEUE_EXT_DESC 9 |
| 30 | |
| 31 | /* TODO: Using associated link desc counter 1 for Rx. Check with FW on |
| 32 | * how these counters are assigned |
| 33 | */ |
| 34 | #define HAL_RX_LINK_DESC_CNTR 1 |
| 35 | /* TODO: Following definition should be from HW headers */ |
| 36 | #define HAL_DESC_REO_OWNED 4 |
| 37 | |
| 38 | /** |
| 39 | * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro |
| 40 | * @owner - owner info |
| 41 | * @buffer_type - buffer type |
| 42 | */ |
| 43 | static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner, |
| 44 | uint32_t buffer_type) |
| 45 | { |
| 46 | HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER, |
| 47 | owner); |
| 48 | HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE, |
| 49 | buffer_type); |
| 50 | } |
| 51 | |
| 52 | #ifndef TID_TO_WME_AC |
| 53 | #define WME_AC_BE 0 /* best effort */ |
| 54 | #define WME_AC_BK 1 /* background */ |
| 55 | #define WME_AC_VI 2 /* video */ |
| 56 | #define WME_AC_VO 3 /* voice */ |
| 57 | |
| 58 | #define TID_TO_WME_AC(_tid) ( \ |
| 59 | (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ |
| 60 | (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ |
| 61 | (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ |
| 62 | WME_AC_VO) |
| 63 | #endif |
| 64 | #define HAL_NON_QOS_TID 16 |
| 65 | |
| 66 | /** |
| 67 | * hal_reo_qdesc_setup - Setup HW REO queue descriptor |
| 68 | * |
| 69 | * @hal_soc: Opaque HAL SOC handle |
| 70 | * @ba_window_size: BlockAck window size |
| 71 | * @start_seq: Starting sequence number |
| 72 | * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory |
| 73 | * @hw_qdesc_paddr: Physical address of REO queue descriptor memory |
| 74 | * @tid: TID |
| 75 | * |
| 76 | */ |
| 77 | void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size, |
| 78 | uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr, |
| 79 | int pn_type) |
| 80 | { |
| 81 | uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; |
| 82 | uint32_t *reo_queue_ext_desc; |
| 83 | uint32_t reg_val; |
| 84 | uint32_t pn_enable; |
| 85 | uint32_t pn_size = 0; |
| 86 | |
| 87 | qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); |
| 88 | |
| 89 | hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, |
| 90 | HAL_REO_QUEUE_DESC); |
| 91 | /* Fixed pattern in reserved bits for debugging */ |
| 92 | HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0, |
| 93 | RESERVED_0A, 0xDDBEEF); |
| 94 | |
| 95 | /* This a just a SW meta data and will be copied to REO destination |
| 96 | * descriptors indicated by hardware. |
| 97 | * TODO: Setting TID in this field. See if we should set something else. |
| 98 | */ |
| 99 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1, |
| 100 | RECEIVE_QUEUE_NUMBER, tid); |
| 101 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, |
| 102 | VLD, 1); |
| 103 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, |
| 104 | ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR); |
| 105 | |
| 106 | /* |
| 107 | * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 |
| 108 | */ |
| 109 | |
| 110 | reg_val = TID_TO_WME_AC(tid); |
| 111 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val); |
| 112 | |
| 113 | if (ba_window_size < 1) |
| 114 | ba_window_size = 1; |
Karunakar Dasineni | 26ebbe4 | 2018-05-31 07:59:10 -0700 | [diff] [blame] | 115 | /* WAR to get 2k exception in Non BA case. |
| 116 | * Setting window size to 2 to get 2k jump exception |
| 117 | * when we receive aggregates in Non BA case |
| 118 | */ |
| 119 | if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID)) |
| 120 | ba_window_size++; |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 121 | /* Set RTY bit for non-BA case. Duplicate detection is currently not |
| 122 | * done by HW in non-BA case if RTY bit is not set. |
| 123 | * TODO: This is a temporary War and should be removed once HW fix is |
| 124 | * made to check and discard duplicates even if RTY bit is not set. |
| 125 | */ |
| 126 | if (ba_window_size == 1) |
| 127 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1); |
| 128 | |
| 129 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE, |
| 130 | ba_window_size - 1); |
| 131 | |
| 132 | switch (pn_type) { |
| 133 | case HAL_PN_WPA: |
| 134 | pn_enable = 1; |
| 135 | pn_size = PN_SIZE_48; |
| 136 | break; |
| 137 | case HAL_PN_WAPI_EVEN: |
| 138 | case HAL_PN_WAPI_UNEVEN: |
| 139 | pn_enable = 1; |
| 140 | pn_size = PN_SIZE_128; |
| 141 | break; |
| 142 | default: |
| 143 | pn_enable = 0; |
| 144 | break; |
| 145 | } |
| 146 | |
| 147 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED, |
| 148 | pn_enable); |
| 149 | |
| 150 | if (pn_type == HAL_PN_WAPI_EVEN) |
| 151 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, |
| 152 | PN_SHALL_BE_EVEN, 1); |
| 153 | else if (pn_type == HAL_PN_WAPI_UNEVEN) |
| 154 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, |
| 155 | PN_SHALL_BE_UNEVEN, 1); |
| 156 | |
| 157 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE, |
| 158 | pn_enable); |
| 159 | |
| 160 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE, |
| 161 | pn_size); |
| 162 | |
| 163 | /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set |
| 164 | * based on BA window size and/or AMPDU capabilities |
| 165 | */ |
| 166 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, |
| 167 | IGNORE_AMPDU_FLAG, 1); |
| 168 | |
| 169 | if (start_seq <= 0xfff) |
| 170 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN, |
| 171 | start_seq); |
| 172 | |
| 173 | /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, |
| 174 | * but REO is not delivering packets if we set it to 1. Need to enable |
| 175 | * this once the issue is resolved |
| 176 | */ |
| 177 | HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0); |
| 178 | |
| 179 | /* TODO: Check if we should set start PN for WAPI */ |
| 180 | |
| 181 | #ifdef notyet |
| 182 | /* Setup first queue extension if BA window size is more than 1 */ |
| 183 | if (ba_window_size > 1) { |
| 184 | reo_queue_ext_desc = |
| 185 | (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) + |
| 186 | 1); |
| 187 | qdf_mem_zero(reo_queue_ext_desc, |
| 188 | sizeof(struct rx_reo_queue_ext)); |
| 189 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 190 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 191 | } |
| 192 | /* Setup second queue extension if BA window size is more than 105 */ |
| 193 | if (ba_window_size > 105) { |
| 194 | reo_queue_ext_desc = (uint32_t *) |
| 195 | (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); |
| 196 | qdf_mem_zero(reo_queue_ext_desc, |
| 197 | sizeof(struct rx_reo_queue_ext)); |
| 198 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 199 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 200 | } |
| 201 | /* Setup third queue extension if BA window size is more than 210 */ |
| 202 | if (ba_window_size > 210) { |
| 203 | reo_queue_ext_desc = (uint32_t *) |
| 204 | (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); |
| 205 | qdf_mem_zero(reo_queue_ext_desc, |
| 206 | sizeof(struct rx_reo_queue_ext)); |
| 207 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 208 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 209 | } |
| 210 | #else |
| 211 | /* TODO: HW queue descriptors are currently allocated for max BA |
| 212 | * window size for all QOS TIDs so that same descriptor can be used |
| 213 | * later when ADDBA request is recevied. This should be changed to |
| 214 | * allocate HW queue descriptors based on BA window size being |
| 215 | * negotiated (0 for non BA cases), and reallocate when BA window |
| 216 | * size changes and also send WMI message to FW to change the REO |
| 217 | * queue descriptor in Rx peer entry as part of dp_rx_tid_update. |
| 218 | */ |
| 219 | if (tid != HAL_NON_QOS_TID) { |
| 220 | reo_queue_ext_desc = (uint32_t *) |
| 221 | (((struct rx_reo_queue *)reo_queue_desc) + 1); |
| 222 | qdf_mem_zero(reo_queue_ext_desc, 3 * |
| 223 | sizeof(struct rx_reo_queue_ext)); |
| 224 | /* Initialize first reo queue extension descriptor */ |
| 225 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 226 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 227 | /* Fixed pattern in reserved bits for debugging */ |
| 228 | HAL_DESC_SET_FIELD(reo_queue_ext_desc, |
| 229 | UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF); |
| 230 | /* Initialize second reo queue extension descriptor */ |
| 231 | reo_queue_ext_desc = (uint32_t *) |
| 232 | (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); |
| 233 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 234 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 235 | /* Fixed pattern in reserved bits for debugging */ |
| 236 | HAL_DESC_SET_FIELD(reo_queue_ext_desc, |
| 237 | UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF); |
| 238 | /* Initialize third reo queue extension descriptor */ |
| 239 | reo_queue_ext_desc = (uint32_t *) |
| 240 | (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); |
| 241 | hal_uniform_desc_hdr_setup(reo_queue_ext_desc, |
| 242 | HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); |
| 243 | /* Fixed pattern in reserved bits for debugging */ |
| 244 | HAL_DESC_SET_FIELD(reo_queue_ext_desc, |
| 245 | UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF); |
| 246 | } |
| 247 | #endif |
| 248 | } |
| 249 | qdf_export_symbol(hal_reo_qdesc_setup); |
| 250 | |
Nandha Kishore Easwaran | e6a27f7 | 2018-09-01 23:04:33 +0530 | [diff] [blame] | 251 | /** |
| 252 | * hal_get_ba_aging_timeout - Get BA Aging timeout |
| 253 | * |
| 254 | * @hal_soc: Opaque HAL SOC handle |
| 255 | * @ac: Access category |
| 256 | * @value: window size to get |
| 257 | */ |
| 258 | void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac, |
| 259 | uint32_t *value) |
| 260 | { |
| 261 | struct hal_soc *soc = (struct hal_soc *)hal_soc; |
| 262 | |
| 263 | switch (ac) { |
| 264 | case WME_AC_BE: |
| 265 | *value = HAL_REG_READ(soc, |
| 266 | HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( |
| 267 | SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; |
| 268 | break; |
| 269 | case WME_AC_BK: |
| 270 | *value = HAL_REG_READ(soc, |
| 271 | HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( |
| 272 | SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; |
| 273 | break; |
| 274 | case WME_AC_VI: |
| 275 | *value = HAL_REG_READ(soc, |
| 276 | HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( |
| 277 | SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; |
| 278 | break; |
| 279 | case WME_AC_VO: |
| 280 | *value = HAL_REG_READ(soc, |
| 281 | HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( |
| 282 | SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; |
| 283 | break; |
| 284 | default: |
| 285 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 286 | "Invalid AC: %d\n", ac); |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | qdf_export_symbol(hal_get_ba_aging_timeout); |
| 291 | |
| 292 | /** |
| 293 | * hal_set_ba_aging_timeout - Set BA Aging timeout |
| 294 | * |
| 295 | * @hal_soc: Opaque HAL SOC handle |
| 296 | * @ac: Access category |
| 297 | * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice |
| 298 | * @value: Input value to set |
| 299 | */ |
| 300 | void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac, |
| 301 | uint32_t value) |
| 302 | { |
| 303 | struct hal_soc *soc = (struct hal_soc *)hal_soc; |
| 304 | |
| 305 | switch (ac) { |
| 306 | case WME_AC_BE: |
| 307 | HAL_REG_WRITE(soc, |
| 308 | HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( |
| 309 | SEQ_WCSS_UMAC_REO_REG_OFFSET), |
| 310 | value * 1000); |
| 311 | break; |
| 312 | case WME_AC_BK: |
| 313 | HAL_REG_WRITE(soc, |
| 314 | HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( |
| 315 | SEQ_WCSS_UMAC_REO_REG_OFFSET), |
| 316 | value * 1000); |
| 317 | break; |
| 318 | case WME_AC_VI: |
| 319 | HAL_REG_WRITE(soc, |
| 320 | HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( |
| 321 | SEQ_WCSS_UMAC_REO_REG_OFFSET), |
| 322 | value * 1000); |
| 323 | break; |
| 324 | case WME_AC_VO: |
| 325 | HAL_REG_WRITE(soc, |
| 326 | HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( |
| 327 | SEQ_WCSS_UMAC_REO_REG_OFFSET), |
| 328 | value * 1000); |
| 329 | break; |
| 330 | default: |
| 331 | QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, |
| 332 | "Invalid AC: %d\n", ac); |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | qdf_export_symbol(hal_set_ba_aging_timeout); |
| 337 | |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 338 | #define BLOCK_RES_MASK 0xF |
| 339 | static inline uint8_t hal_find_one_bit(uint8_t x) |
| 340 | { |
| 341 | uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK; |
| 342 | uint8_t pos; |
| 343 | |
| 344 | for (pos = 0; y; y >>= 1) |
| 345 | pos++; |
| 346 | |
| 347 | return pos-1; |
| 348 | } |
| 349 | |
| 350 | static inline uint8_t hal_find_zero_bit(uint8_t x) |
| 351 | { |
| 352 | uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK; |
| 353 | uint8_t pos; |
| 354 | |
| 355 | for (pos = 0; y; y >>= 1) |
| 356 | pos++; |
| 357 | |
| 358 | return pos-1; |
| 359 | } |
| 360 | |
| 361 | inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, |
| 362 | enum hal_reo_cmd_type type, |
| 363 | uint32_t paddr_lo, |
| 364 | uint8_t paddr_hi) |
| 365 | { |
| 366 | switch (type) { |
| 367 | case CMD_GET_QUEUE_STATS: |
| 368 | HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1, |
| 369 | RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); |
| 370 | HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, |
| 371 | RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); |
| 372 | break; |
| 373 | case CMD_FLUSH_QUEUE: |
| 374 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1, |
| 375 | FLUSH_DESC_ADDR_31_0, paddr_lo); |
| 376 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, |
| 377 | FLUSH_DESC_ADDR_39_32, paddr_hi); |
| 378 | break; |
| 379 | case CMD_FLUSH_CACHE: |
| 380 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1, |
| 381 | FLUSH_ADDR_31_0, paddr_lo); |
| 382 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
| 383 | FLUSH_ADDR_39_32, paddr_hi); |
| 384 | break; |
| 385 | case CMD_UPDATE_RX_REO_QUEUE: |
| 386 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1, |
| 387 | RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); |
| 388 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 389 | RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); |
| 390 | break; |
| 391 | default: |
| 392 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 393 | "%s: Invalid REO command type", __func__); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 394 | break; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc, |
| 399 | struct hal_reo_cmd_params *cmd) |
| 400 | |
| 401 | { |
| 402 | uint32_t *reo_desc, val; |
| 403 | |
| 404 | hal_srng_access_start(soc, reo_ring); |
| 405 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 406 | if (!reo_desc) { |
| 407 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 408 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 409 | hal_srng_access_end(soc, reo_ring); |
| 410 | return -EBUSY; |
| 411 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 412 | |
| 413 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 414 | sizeof(struct reo_get_queue_stats)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 415 | |
| 416 | /* Offsets of descriptor fields defined in HW headers start from |
| 417 | * the field after TLV header */ |
| 418 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 419 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 420 | sizeof(struct reo_get_queue_stats) - |
| 421 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 422 | |
| 423 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 424 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 425 | |
| 426 | hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS, |
| 427 | cmd->std.addr_lo, |
| 428 | cmd->std.addr_hi); |
| 429 | |
| 430 | HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS, |
| 431 | cmd->u.stats_params.clear); |
| 432 | |
| 433 | hal_srng_access_end(soc, reo_ring); |
| 434 | |
| 435 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 436 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 437 | val); |
| 438 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 439 | qdf_export_symbol(hal_reo_cmd_queue_stats); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 440 | |
| 441 | inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc, |
| 442 | struct hal_reo_cmd_params *cmd) |
| 443 | { |
| 444 | uint32_t *reo_desc, val; |
| 445 | |
| 446 | hal_srng_access_start(soc, reo_ring); |
| 447 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 448 | if (!reo_desc) { |
| 449 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 450 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 451 | hal_srng_access_end(soc, reo_ring); |
| 452 | return -EBUSY; |
| 453 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 454 | |
| 455 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 456 | sizeof(struct reo_flush_queue)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 457 | |
| 458 | /* Offsets of descriptor fields defined in HW headers start from |
| 459 | * the field after TLV header */ |
| 460 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 461 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 462 | sizeof(struct reo_flush_queue) - |
| 463 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 464 | |
| 465 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 466 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 467 | |
| 468 | hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo, |
| 469 | cmd->std.addr_hi); |
| 470 | |
| 471 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, |
| 472 | BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 473 | cmd->u.fl_queue_params.block_use_after_flush); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 474 | |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 475 | if (cmd->u.fl_queue_params.block_use_after_flush) { |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 476 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, |
| 477 | BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index); |
| 478 | } |
| 479 | |
| 480 | hal_srng_access_end(soc, reo_ring); |
| 481 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 482 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 483 | val); |
| 484 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 485 | qdf_export_symbol(hal_reo_cmd_flush_queue); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 486 | |
| 487 | inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc, |
| 488 | struct hal_reo_cmd_params *cmd) |
| 489 | { |
| 490 | uint32_t *reo_desc, val; |
| 491 | struct hal_reo_cmd_flush_cache_params *cp; |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 492 | uint8_t index = 0; |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 493 | |
| 494 | cp = &cmd->u.fl_cache_params; |
| 495 | |
| 496 | hal_srng_access_start(soc, reo_ring); |
| 497 | |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 498 | /* We need a cache block resource for this operation, and REO HW has |
| 499 | * only 4 such blocking resources. These resources are managed using |
| 500 | * reo_res_bitmap, and we return failure if none is available. |
| 501 | */ |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 502 | if (cp->block_use_after_flush) { |
| 503 | index = hal_find_zero_bit(soc->reo_res_bitmap); |
| 504 | if (index > 3) { |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 505 | qdf_print("%s, No blocking resource available!", |
| 506 | __func__); |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 507 | hal_srng_access_end(soc, reo_ring); |
| 508 | return -EBUSY; |
| 509 | } |
| 510 | soc->index = index; |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 511 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 512 | |
| 513 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 514 | if (!reo_desc) { |
| 515 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 516 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 517 | hal_srng_access_end(soc, reo_ring); |
Kai Liu | b8e1241 | 2018-01-12 16:52:26 +0800 | [diff] [blame] | 518 | hal_srng_dump(reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 519 | return -EBUSY; |
| 520 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 521 | |
| 522 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 523 | sizeof(struct reo_flush_cache)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 524 | |
| 525 | /* Offsets of descriptor fields defined in HW headers start from |
| 526 | * the field after TLV header */ |
| 527 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 528 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 529 | sizeof(struct reo_flush_cache) - |
| 530 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 531 | |
| 532 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 533 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 534 | |
| 535 | hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo, |
| 536 | cmd->std.addr_hi); |
| 537 | |
| 538 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
| 539 | FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue); |
| 540 | |
| 541 | /* set it to 0 for now */ |
| 542 | cp->rel_block_index = 0; |
| 543 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
| 544 | RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); |
| 545 | |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 546 | if (cp->block_use_after_flush) { |
| 547 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
| 548 | CACHE_BLOCK_RESOURCE_INDEX, index); |
| 549 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 550 | |
| 551 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
| 552 | FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); |
| 553 | |
| 554 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 555 | BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 556 | |
| 557 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, |
| 558 | cp->flush_all); |
| 559 | |
| 560 | hal_srng_access_end(soc, reo_ring); |
| 561 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 562 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 563 | val); |
| 564 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 565 | qdf_export_symbol(hal_reo_cmd_flush_cache); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 566 | |
| 567 | inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc, |
| 568 | struct hal_reo_cmd_params *cmd) |
| 569 | |
| 570 | { |
| 571 | uint32_t *reo_desc, val; |
Venkata Sharath Chandra Manchala | 8e8d8f1 | 2017-01-13 00:00:58 -0800 | [diff] [blame] | 572 | uint8_t index = 0; |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 573 | |
| 574 | hal_srng_access_start(soc, reo_ring); |
| 575 | |
| 576 | if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { |
| 577 | index = hal_find_one_bit(soc->reo_res_bitmap); |
| 578 | if (index > 3) { |
| 579 | hal_srng_access_end(soc, reo_ring); |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 580 | qdf_print("%s: No blocking resource to unblock!", |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 581 | __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 582 | return -EBUSY; |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 583 | } |
| 584 | } |
| 585 | |
| 586 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 587 | if (!reo_desc) { |
| 588 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 589 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 590 | hal_srng_access_end(soc, reo_ring); |
| 591 | return -EBUSY; |
| 592 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 593 | |
| 594 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 595 | sizeof(struct reo_unblock_cache)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 596 | |
| 597 | /* Offsets of descriptor fields defined in HW headers start from |
| 598 | * the field after TLV header */ |
| 599 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 600 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 601 | sizeof(struct reo_unblock_cache) - |
| 602 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 603 | |
| 604 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 605 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 606 | |
| 607 | HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, |
| 608 | UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); |
| 609 | |
| 610 | if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { |
| 611 | HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, |
Karunakar Dasineni | 4f886f3 | 2017-05-31 22:39:39 -0700 | [diff] [blame] | 612 | CACHE_BLOCK_RESOURCE_INDEX, |
| 613 | cmd->u.unblk_cache_params.index); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 614 | } |
| 615 | |
| 616 | hal_srng_access_end(soc, reo_ring); |
| 617 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 618 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 619 | val); |
| 620 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 621 | qdf_export_symbol(hal_reo_cmd_unblock_cache); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 622 | |
| 623 | inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc, |
| 624 | struct hal_reo_cmd_params *cmd) |
| 625 | { |
| 626 | uint32_t *reo_desc, val; |
| 627 | |
| 628 | hal_srng_access_start(soc, reo_ring); |
| 629 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 630 | if (!reo_desc) { |
| 631 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 632 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 633 | hal_srng_access_end(soc, reo_ring); |
| 634 | return -EBUSY; |
| 635 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 636 | |
| 637 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 638 | sizeof(struct reo_flush_timeout_list)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 639 | |
| 640 | /* Offsets of descriptor fields defined in HW headers start from |
| 641 | * the field after TLV header */ |
| 642 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 643 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 644 | sizeof(struct reo_flush_timeout_list) - |
| 645 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 646 | |
| 647 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 648 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 649 | |
| 650 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST, |
| 651 | cmd->u.fl_tim_list_params.ac_list); |
| 652 | |
| 653 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, |
| 654 | MINIMUM_RELEASE_DESC_COUNT, |
| 655 | cmd->u.fl_tim_list_params.min_rel_desc); |
| 656 | |
| 657 | HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, |
| 658 | MINIMUM_FORWARD_BUF_COUNT, |
| 659 | cmd->u.fl_tim_list_params.min_fwd_buf); |
| 660 | |
| 661 | hal_srng_access_end(soc, reo_ring); |
| 662 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 663 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 664 | val); |
| 665 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 666 | qdf_export_symbol(hal_reo_cmd_flush_timeout_list); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 667 | |
| 668 | inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc, |
| 669 | struct hal_reo_cmd_params *cmd) |
| 670 | { |
| 671 | uint32_t *reo_desc, val; |
| 672 | struct hal_reo_cmd_update_queue_params *p; |
| 673 | |
| 674 | p = &cmd->u.upd_queue_params; |
| 675 | |
| 676 | hal_srng_access_start(soc, reo_ring); |
| 677 | reo_desc = hal_srng_src_get_next(soc, reo_ring); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 678 | if (!reo_desc) { |
| 679 | QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, |
Aditya Sathish | ded018e | 2018-07-02 16:25:21 +0530 | [diff] [blame] | 680 | "%s: Out of cmd ring entries", __func__); |
Karunakar Dasineni | a8c779b | 2017-01-11 13:57:55 -0800 | [diff] [blame] | 681 | hal_srng_access_end(soc, reo_ring); |
| 682 | return -EBUSY; |
| 683 | } |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 684 | |
| 685 | HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, |
| 686 | sizeof(struct reo_update_rx_reo_queue)); |
| 687 | |
| 688 | /* Offsets of descriptor fields defined in HW headers start from |
| 689 | * the field after TLV header */ |
| 690 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
Karunakar Dasineni | 6a52675 | 2018-08-02 08:56:19 -0700 | [diff] [blame] | 691 | qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), |
| 692 | sizeof(struct reo_update_rx_reo_queue) - |
| 693 | (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 694 | |
| 695 | HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, |
| 696 | REO_STATUS_REQUIRED, cmd->std.need_status); |
| 697 | |
| 698 | hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE, |
| 699 | cmd->std.addr_lo, cmd->std.addr_hi); |
| 700 | |
| 701 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 702 | UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num); |
| 703 | |
| 704 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD, |
| 705 | p->update_vld); |
| 706 | |
| 707 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 708 | UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, |
| 709 | p->update_assoc_link_desc); |
| 710 | |
| 711 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 712 | UPDATE_DISABLE_DUPLICATE_DETECTION, |
| 713 | p->update_disable_dup_detect); |
| 714 | |
| 715 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 716 | UPDATE_DISABLE_DUPLICATE_DETECTION, |
| 717 | p->update_disable_dup_detect); |
| 718 | |
| 719 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 720 | UPDATE_SOFT_REORDER_ENABLE, |
| 721 | p->update_soft_reorder_enab); |
| 722 | |
| 723 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 724 | UPDATE_AC, p->update_ac); |
| 725 | |
| 726 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 727 | UPDATE_BAR, p->update_bar); |
| 728 | |
| 729 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 730 | UPDATE_BAR, p->update_bar); |
| 731 | |
| 732 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 733 | UPDATE_RTY, p->update_rty); |
| 734 | |
| 735 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 736 | UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); |
| 737 | |
| 738 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 739 | UPDATE_OOR_MODE, p->update_oor_mode); |
| 740 | |
| 741 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 742 | UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); |
| 743 | |
| 744 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 745 | UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed); |
| 746 | |
| 747 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 748 | UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); |
| 749 | |
| 750 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 751 | UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); |
| 752 | |
| 753 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 754 | UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab); |
| 755 | |
| 756 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 757 | UPDATE_PN_SIZE, p->update_pn_size); |
| 758 | |
| 759 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 760 | UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); |
| 761 | |
| 762 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 763 | UPDATE_SVLD, p->update_svld); |
| 764 | |
| 765 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 766 | UPDATE_SSN, p->update_ssn); |
| 767 | |
| 768 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 769 | UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, |
| 770 | p->update_seq_2k_err_detect); |
| 771 | |
| 772 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 773 | UPDATE_PN_VALID, p->update_pn_valid); |
| 774 | |
| 775 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, |
| 776 | UPDATE_PN, p->update_pn); |
| 777 | |
| 778 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 779 | RECEIVE_QUEUE_NUMBER, p->rx_queue_num); |
| 780 | |
| 781 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 782 | VLD, p->vld); |
| 783 | |
| 784 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 785 | ASSOCIATED_LINK_DESCRIPTOR_COUNTER, |
| 786 | p->assoc_link_desc); |
| 787 | |
| 788 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 789 | DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect); |
| 790 | |
| 791 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 792 | SOFT_REORDER_ENABLE, p->soft_reorder_enab); |
| 793 | |
| 794 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac); |
| 795 | |
| 796 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 797 | BAR, p->bar); |
| 798 | |
| 799 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 800 | CHK_2K_MODE, p->chk_2k_mode); |
| 801 | |
| 802 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 803 | RTY, p->rty); |
| 804 | |
| 805 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 806 | OOR_MODE, p->oor_mode); |
| 807 | |
| 808 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 809 | PN_CHECK_NEEDED, p->pn_check_needed); |
| 810 | |
| 811 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 812 | PN_SHALL_BE_EVEN, p->pn_even); |
| 813 | |
| 814 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 815 | PN_SHALL_BE_UNEVEN, p->pn_uneven); |
| 816 | |
| 817 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 818 | PN_HANDLING_ENABLE, p->pn_hand_enab); |
| 819 | |
| 820 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, |
| 821 | IGNORE_AMPDU_FLAG, p->ignore_ampdu); |
| 822 | |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 823 | if (p->ba_window_size < 1) |
| 824 | p->ba_window_size = 1; |
sumedh baikady | df4a57c | 2018-04-08 22:19:22 -0700 | [diff] [blame] | 825 | /* |
| 826 | * WAR to get 2k exception in Non BA case. |
| 827 | * Setting window size to 2 to get 2k jump exception |
| 828 | * when we receive aggregates in Non BA case |
| 829 | */ |
| 830 | if (p->ba_window_size == 1) |
| 831 | p->ba_window_size++; |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 832 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
Karunakar Dasineni | 7957fa9 | 2017-02-23 23:05:40 -0800 | [diff] [blame] | 833 | BA_WINDOW_SIZE, p->ba_window_size - 1); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 834 | |
Gurumoorthi Gnanasambandhan | ed4bcf8 | 2017-05-24 00:10:59 +0530 | [diff] [blame] | 835 | if (p->pn_size == 24) |
| 836 | p->pn_size = PN_SIZE_24; |
| 837 | else if (p->pn_size == 48) |
| 838 | p->pn_size = PN_SIZE_48; |
| 839 | else if (p->pn_size == 128) |
| 840 | p->pn_size = PN_SIZE_128; |
| 841 | |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 842 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
| 843 | PN_SIZE, p->pn_size); |
| 844 | |
| 845 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
| 846 | SVLD, p->svld); |
| 847 | |
| 848 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
| 849 | SSN, p->ssn); |
| 850 | |
| 851 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
| 852 | SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); |
| 853 | |
| 854 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, |
| 855 | PN_ERROR_DETECTED_FLAG, p->pn_err_detect); |
| 856 | |
| 857 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5, |
| 858 | PN_31_0, p->pn_31_0); |
| 859 | |
| 860 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6, |
| 861 | PN_63_32, p->pn_63_32); |
| 862 | |
| 863 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7, |
| 864 | PN_95_64, p->pn_95_64); |
| 865 | |
| 866 | HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8, |
| 867 | PN_127_96, p->pn_127_96); |
| 868 | |
| 869 | hal_srng_access_end(soc, reo_ring); |
| 870 | val = reo_desc[CMD_HEADER_DW_OFFSET]; |
| 871 | return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, |
| 872 | val); |
| 873 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 874 | qdf_export_symbol(hal_reo_cmd_update_rx_queue); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 875 | |
| 876 | inline void hal_reo_queue_stats_status(uint32_t *reo_desc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 877 | struct hal_reo_queue_status *st, |
| 878 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 879 | { |
| 880 | uint32_t val; |
| 881 | |
| 882 | /* Offsets of descriptor fields defined in HW headers start |
| 883 | * from the field after TLV header */ |
| 884 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 885 | |
| 886 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 887 | hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV, |
| 888 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 889 | |
| 890 | /* SSN */ |
| 891 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)]; |
| 892 | st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val); |
| 893 | |
| 894 | /* current index */ |
| 895 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, |
| 896 | CURRENT_INDEX)]; |
| 897 | st->curr_idx = |
| 898 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, |
| 899 | CURRENT_INDEX, val); |
| 900 | |
| 901 | /* PN bits */ |
| 902 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3, |
| 903 | PN_31_0)]; |
| 904 | st->pn_31_0 = |
| 905 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3, |
| 906 | PN_31_0, val); |
| 907 | |
| 908 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4, |
| 909 | PN_63_32)]; |
| 910 | st->pn_63_32 = |
| 911 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4, |
| 912 | PN_63_32, val); |
| 913 | |
| 914 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5, |
| 915 | PN_95_64)]; |
| 916 | st->pn_95_64 = |
| 917 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5, |
| 918 | PN_95_64, val); |
| 919 | |
| 920 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6, |
| 921 | PN_127_96)]; |
| 922 | st->pn_127_96 = |
| 923 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6, |
| 924 | PN_127_96, val); |
| 925 | |
| 926 | /* timestamps */ |
| 927 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7, |
| 928 | LAST_RX_ENQUEUE_TIMESTAMP)]; |
| 929 | st->last_rx_enq_tstamp = |
| 930 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7, |
| 931 | LAST_RX_ENQUEUE_TIMESTAMP, val); |
| 932 | |
| 933 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8, |
| 934 | LAST_RX_DEQUEUE_TIMESTAMP)]; |
| 935 | st->last_rx_deq_tstamp = |
| 936 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8, |
| 937 | LAST_RX_DEQUEUE_TIMESTAMP, val); |
| 938 | |
| 939 | /* rx bitmap */ |
| 940 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9, |
| 941 | RX_BITMAP_31_0)]; |
| 942 | st->rx_bitmap_31_0 = |
| 943 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9, |
| 944 | RX_BITMAP_31_0, val); |
| 945 | |
| 946 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10, |
| 947 | RX_BITMAP_63_32)]; |
| 948 | st->rx_bitmap_63_32 = |
| 949 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10, |
| 950 | RX_BITMAP_63_32, val); |
| 951 | |
| 952 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11, |
| 953 | RX_BITMAP_95_64)]; |
| 954 | st->rx_bitmap_95_64 = |
| 955 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11, |
| 956 | RX_BITMAP_95_64, val); |
| 957 | |
| 958 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12, |
| 959 | RX_BITMAP_127_96)]; |
| 960 | st->rx_bitmap_127_96 = |
| 961 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12, |
| 962 | RX_BITMAP_127_96, val); |
| 963 | |
| 964 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13, |
| 965 | RX_BITMAP_159_128)]; |
| 966 | st->rx_bitmap_159_128 = |
| 967 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13, |
| 968 | RX_BITMAP_159_128, val); |
| 969 | |
| 970 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14, |
| 971 | RX_BITMAP_191_160)]; |
| 972 | st->rx_bitmap_191_160 = |
| 973 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14, |
| 974 | RX_BITMAP_191_160, val); |
| 975 | |
| 976 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15, |
| 977 | RX_BITMAP_223_192)]; |
| 978 | st->rx_bitmap_223_192 = |
| 979 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15, |
| 980 | RX_BITMAP_223_192, val); |
| 981 | |
| 982 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16, |
| 983 | RX_BITMAP_255_224)]; |
| 984 | st->rx_bitmap_255_224 = |
| 985 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16, |
| 986 | RX_BITMAP_255_224, val); |
| 987 | |
| 988 | /* various counts */ |
| 989 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, |
| 990 | CURRENT_MPDU_COUNT)]; |
| 991 | st->curr_mpdu_cnt = |
| 992 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, |
| 993 | CURRENT_MPDU_COUNT, val); |
| 994 | |
| 995 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, |
| 996 | CURRENT_MSDU_COUNT)]; |
| 997 | st->curr_msdu_cnt = |
| 998 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, |
| 999 | CURRENT_MSDU_COUNT, val); |
| 1000 | |
| 1001 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, |
| 1002 | TIMEOUT_COUNT)]; |
| 1003 | st->fwd_timeout_cnt = |
| 1004 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, |
| 1005 | TIMEOUT_COUNT, val); |
| 1006 | |
| 1007 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, |
| 1008 | FORWARD_DUE_TO_BAR_COUNT)]; |
| 1009 | st->fwd_bar_cnt = |
| 1010 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, |
| 1011 | FORWARD_DUE_TO_BAR_COUNT, val); |
| 1012 | |
| 1013 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, |
| 1014 | DUPLICATE_COUNT)]; |
| 1015 | st->dup_cnt = |
| 1016 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, |
| 1017 | DUPLICATE_COUNT, val); |
| 1018 | |
| 1019 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, |
| 1020 | FRAMES_IN_ORDER_COUNT)]; |
| 1021 | st->frms_in_order_cnt = |
| 1022 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, |
| 1023 | FRAMES_IN_ORDER_COUNT, val); |
| 1024 | |
| 1025 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, |
| 1026 | BAR_RECEIVED_COUNT)]; |
| 1027 | st->bar_rcvd_cnt = |
| 1028 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, |
| 1029 | BAR_RECEIVED_COUNT, val); |
| 1030 | |
| 1031 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20, |
| 1032 | MPDU_FRAMES_PROCESSED_COUNT)]; |
| 1033 | st->mpdu_frms_cnt = |
| 1034 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20, |
| 1035 | MPDU_FRAMES_PROCESSED_COUNT, val); |
| 1036 | |
| 1037 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21, |
| 1038 | MSDU_FRAMES_PROCESSED_COUNT)]; |
| 1039 | st->msdu_frms_cnt = |
| 1040 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21, |
| 1041 | MSDU_FRAMES_PROCESSED_COUNT, val); |
| 1042 | |
| 1043 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22, |
| 1044 | TOTAL_PROCESSED_BYTE_COUNT)]; |
| 1045 | st->total_cnt = |
| 1046 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22, |
| 1047 | TOTAL_PROCESSED_BYTE_COUNT, val); |
| 1048 | |
| 1049 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, |
| 1050 | LATE_RECEIVE_MPDU_COUNT)]; |
| 1051 | st->late_recv_mpdu_cnt = |
| 1052 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, |
| 1053 | LATE_RECEIVE_MPDU_COUNT, val); |
| 1054 | |
| 1055 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, |
| 1056 | WINDOW_JUMP_2K)]; |
| 1057 | st->win_jump_2k = |
| 1058 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, |
| 1059 | WINDOW_JUMP_2K, val); |
| 1060 | |
| 1061 | val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, |
| 1062 | HOLE_COUNT)]; |
| 1063 | st->hole_cnt = |
| 1064 | HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, |
| 1065 | HOLE_COUNT, val); |
| 1066 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1067 | qdf_export_symbol(hal_reo_queue_stats_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1068 | |
| 1069 | inline void hal_reo_flush_queue_status(uint32_t *reo_desc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1070 | struct hal_reo_flush_queue_status *st, |
| 1071 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1072 | { |
| 1073 | uint32_t val; |
| 1074 | |
| 1075 | /* Offsets of descriptor fields defined in HW headers start |
| 1076 | * from the field after TLV header */ |
| 1077 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1078 | |
| 1079 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1080 | hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV, |
| 1081 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1082 | |
| 1083 | /* error bit */ |
| 1084 | val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2, |
| 1085 | ERROR_DETECTED)]; |
| 1086 | st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, |
| 1087 | val); |
| 1088 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1089 | qdf_export_symbol(hal_reo_flush_queue_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1090 | |
| 1091 | inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1092 | struct hal_reo_flush_cache_status *st, |
| 1093 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1094 | { |
| 1095 | uint32_t val; |
| 1096 | |
| 1097 | /* Offsets of descriptor fields defined in HW headers start |
| 1098 | * from the field after TLV header */ |
| 1099 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1100 | |
| 1101 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1102 | hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV, |
| 1103 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1104 | |
| 1105 | /* error bit */ |
| 1106 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, |
| 1107 | ERROR_DETECTED)]; |
| 1108 | st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, |
| 1109 | val); |
| 1110 | |
| 1111 | /* block error */ |
| 1112 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, |
| 1113 | BLOCK_ERROR_DETAILS)]; |
| 1114 | st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, |
| 1115 | BLOCK_ERROR_DETAILS, |
| 1116 | val); |
| 1117 | if (!st->block_error) |
Venkata Sharath Chandra Manchala | 8e8d8f1 | 2017-01-13 00:00:58 -0800 | [diff] [blame] | 1118 | qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1119 | |
| 1120 | /* cache flush status */ |
| 1121 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, |
| 1122 | CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; |
| 1123 | st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, |
| 1124 | CACHE_CONTROLLER_FLUSH_STATUS_HIT, |
| 1125 | val); |
| 1126 | |
| 1127 | /* cache flush descriptor type */ |
| 1128 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, |
| 1129 | CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; |
| 1130 | st->cache_flush_status_desc_type = |
| 1131 | HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, |
| 1132 | CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, |
| 1133 | val); |
| 1134 | |
| 1135 | /* cache flush count */ |
| 1136 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, |
| 1137 | CACHE_CONTROLLER_FLUSH_COUNT)]; |
| 1138 | st->cache_flush_cnt = |
| 1139 | HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, |
| 1140 | CACHE_CONTROLLER_FLUSH_COUNT, |
| 1141 | val); |
| 1142 | |
| 1143 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1144 | qdf_export_symbol(hal_reo_flush_cache_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1145 | |
| 1146 | inline void hal_reo_unblock_cache_status(uint32_t *reo_desc, |
| 1147 | struct hal_soc *soc, |
| 1148 | struct hal_reo_unblk_cache_status *st) |
| 1149 | { |
| 1150 | uint32_t val; |
| 1151 | |
| 1152 | /* Offsets of descriptor fields defined in HW headers start |
| 1153 | * from the field after TLV header */ |
| 1154 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1155 | |
| 1156 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1157 | hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV, |
| 1158 | &(st->header), soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1159 | |
| 1160 | /* error bit */ |
| 1161 | val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, |
| 1162 | ERROR_DETECTED)]; |
| 1163 | st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, |
| 1164 | ERROR_DETECTED, |
| 1165 | val); |
| 1166 | |
| 1167 | /* unblock type */ |
| 1168 | val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, |
| 1169 | UNBLOCK_TYPE)]; |
| 1170 | st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, |
| 1171 | UNBLOCK_TYPE, |
| 1172 | val); |
| 1173 | |
| 1174 | if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) |
Venkata Sharath Chandra Manchala | 8e8d8f1 | 2017-01-13 00:00:58 -0800 | [diff] [blame] | 1175 | qdf_clear_bit(soc->index, |
| 1176 | (unsigned long *)&soc->reo_res_bitmap); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1177 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1178 | qdf_export_symbol(hal_reo_unblock_cache_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1179 | |
| 1180 | inline void hal_reo_flush_timeout_list_status( |
| 1181 | uint32_t *reo_desc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1182 | struct hal_reo_flush_timeout_list_status *st, |
| 1183 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1184 | |
| 1185 | { |
| 1186 | uint32_t val; |
| 1187 | |
| 1188 | /* Offsets of descriptor fields defined in HW headers start |
| 1189 | * from the field after TLV header */ |
| 1190 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1191 | |
| 1192 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1193 | hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV, |
| 1194 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1195 | |
| 1196 | /* error bit */ |
| 1197 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, |
| 1198 | ERROR_DETECTED)]; |
| 1199 | st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, |
| 1200 | ERROR_DETECTED, |
| 1201 | val); |
| 1202 | |
| 1203 | /* list empty */ |
| 1204 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, |
| 1205 | TIMOUT_LIST_EMPTY)]; |
| 1206 | st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, |
| 1207 | TIMOUT_LIST_EMPTY, |
| 1208 | val); |
| 1209 | |
| 1210 | /* release descriptor count */ |
| 1211 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, |
| 1212 | RELEASE_DESC_COUNT)]; |
| 1213 | st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, |
| 1214 | RELEASE_DESC_COUNT, |
| 1215 | val); |
| 1216 | |
| 1217 | /* forward buf count */ |
| 1218 | val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, |
| 1219 | FORWARD_BUF_COUNT)]; |
| 1220 | st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, |
| 1221 | FORWARD_BUF_COUNT, |
| 1222 | val); |
| 1223 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1224 | qdf_export_symbol(hal_reo_flush_timeout_list_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1225 | |
| 1226 | inline void hal_reo_desc_thres_reached_status( |
| 1227 | uint32_t *reo_desc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1228 | struct hal_reo_desc_thres_reached_status *st, |
| 1229 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1230 | { |
| 1231 | uint32_t val; |
| 1232 | |
| 1233 | /* Offsets of descriptor fields defined in HW headers start |
| 1234 | * from the field after TLV header */ |
| 1235 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1236 | |
| 1237 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1238 | hal_reo_status_get_header(reo_desc, |
| 1239 | HAL_REO_DESC_THRES_STATUS_TLV, |
| 1240 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1241 | |
| 1242 | /* threshold index */ |
| 1243 | val = reo_desc[HAL_OFFSET_DW( |
| 1244 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, |
| 1245 | THRESHOLD_INDEX)]; |
| 1246 | st->thres_index = HAL_GET_FIELD( |
| 1247 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, |
| 1248 | THRESHOLD_INDEX, |
| 1249 | val); |
| 1250 | |
| 1251 | /* link desc counters */ |
| 1252 | val = reo_desc[HAL_OFFSET_DW( |
| 1253 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, |
| 1254 | LINK_DESCRIPTOR_COUNTER0)]; |
| 1255 | st->link_desc_counter0 = HAL_GET_FIELD( |
| 1256 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, |
| 1257 | LINK_DESCRIPTOR_COUNTER0, |
| 1258 | val); |
| 1259 | |
| 1260 | val = reo_desc[HAL_OFFSET_DW( |
| 1261 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, |
| 1262 | LINK_DESCRIPTOR_COUNTER1)]; |
| 1263 | st->link_desc_counter1 = HAL_GET_FIELD( |
| 1264 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, |
| 1265 | LINK_DESCRIPTOR_COUNTER1, |
| 1266 | val); |
| 1267 | |
| 1268 | val = reo_desc[HAL_OFFSET_DW( |
| 1269 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, |
| 1270 | LINK_DESCRIPTOR_COUNTER2)]; |
| 1271 | st->link_desc_counter2 = HAL_GET_FIELD( |
| 1272 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, |
| 1273 | LINK_DESCRIPTOR_COUNTER2, |
| 1274 | val); |
| 1275 | |
| 1276 | val = reo_desc[HAL_OFFSET_DW( |
| 1277 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, |
| 1278 | LINK_DESCRIPTOR_COUNTER_SUM)]; |
| 1279 | st->link_desc_counter_sum = HAL_GET_FIELD( |
| 1280 | REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, |
| 1281 | LINK_DESCRIPTOR_COUNTER_SUM, |
| 1282 | val); |
| 1283 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1284 | qdf_export_symbol(hal_reo_desc_thres_reached_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1285 | |
| 1286 | inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc, |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1287 | struct hal_reo_update_rx_queue_status *st, |
| 1288 | struct hal_soc *hal_soc) |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1289 | { |
| 1290 | /* Offsets of descriptor fields defined in HW headers start |
| 1291 | * from the field after TLV header */ |
| 1292 | reo_desc += (sizeof(struct tlv_32_hdr) >> 2); |
| 1293 | |
| 1294 | /* header */ |
Balamurugan Mahalingam | 5d80641 | 2018-07-30 18:04:15 +0530 | [diff] [blame] | 1295 | hal_reo_status_get_header(reo_desc, |
| 1296 | HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV, |
| 1297 | &(st->header), hal_soc); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1298 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1299 | qdf_export_symbol(hal_reo_rx_update_queue_status); |
Manoj Ekbote | 4f0c6b1 | 2016-10-30 16:01:38 -0700 | [diff] [blame] | 1300 | |
| 1301 | /** |
| 1302 | * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG |
| 1303 | * with command number |
| 1304 | * @hal_soc: Handle to HAL SoC structure |
| 1305 | * @hal_ring: Handle to HAL SRNG structure |
| 1306 | * |
| 1307 | * Return: none |
| 1308 | */ |
| 1309 | inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng) |
| 1310 | { |
| 1311 | int cmd_num; |
| 1312 | uint32_t *desc_addr; |
| 1313 | struct hal_srng_params srng_params; |
| 1314 | uint32_t desc_size; |
| 1315 | uint32_t num_desc; |
| 1316 | |
| 1317 | hal_get_srng_params(soc, hal_srng, &srng_params); |
| 1318 | |
| 1319 | desc_addr = (uint32_t *)(srng_params.ring_base_vaddr); |
| 1320 | desc_addr += (sizeof(struct tlv_32_hdr) >> 2); |
| 1321 | desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2; |
| 1322 | num_desc = srng_params.num_entries; |
| 1323 | cmd_num = 1; |
| 1324 | while (num_desc) { |
| 1325 | /* Offsets of descriptor fields defined in HW headers start |
| 1326 | * from the field after TLV header */ |
| 1327 | HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0, |
| 1328 | REO_CMD_NUMBER, cmd_num); |
| 1329 | desc_addr += desc_size; |
| 1330 | num_desc--; cmd_num++; |
| 1331 | } |
| 1332 | |
| 1333 | soc->reo_res_bitmap = 0; |
| 1334 | } |
Pratik Gandhi | dc82a77 | 2018-01-30 18:57:05 +0530 | [diff] [blame] | 1335 | qdf_export_symbol(hal_reo_init_cmd_ring); |